How do I read keyboard events from file? - json

I have read this question, which is similar and gets me most of the way.
The answer of the code isn't posted, but I believe I have followed the instructions and managed to get it working -- except after it's been opened.
It works perfectly fine immediately after recording, however I want to save the data and read it again for later use: literally every time I run the program and I don't want to have to re-record it every time.
import keyboard
import threading
from keyboard import KeyboardEvent
import time
import json
def record(file='record.txt'):
f = open(file, 'w+')
keyboard_events = []
keyboard.start_recording()
starttime = time.time()
keyboard.wait('esc')
keyboard_events = keyboard.stop_recording()
print(starttime, file=f)
for kevent in range(0, len(keyboard_events)):
print(keyboard_events[kevent].to_json(), file = f)
f.close()
def play(file="record.txt", speed = 1):
f = open(file, 'r')
lines = f.readlines()
f.close()
keyboard_events = []
for index in range(1,len(lines)):
keyboard_events.append(keyboard.KeyboardEvent(**json.loads(lines[index])))
starttime = float(lines[0])
keyboard_time_interval = keyboard_events[0].time - starttime
keyboard_time_interval /= speed
k_thread = threading.Thread(target = lambda : time.sleep(keyboard_time_interval) == keyboard.play(keyboard_events, speed_factor=speed) )
k_thread.start()
k_thread.join()
I am not especially new to coding, or the Python language, but this problem perplexes me. I've tested all the variables and none of them are being sustained outside of the record function.
(I don't fully understand lambda, Threading or **json.loads, but I don't think that's a problem.)
What's going on here?
For extra bonus points, if this is possible to do asynchronously, that'd be amazing. One problem at a time, though.

Just in case anyone else ever has the same problem as me, just tag this at the start of your code. No idea why it works, but it does.
keyboard.start_recording()
temp = keyboard.stop_recording()
You can forget about the temp variable immediately.

Related

how to update progress bar in callback?

I need to create a heatmap, before plotting heatmap, I need to download a lot of data from database which take time like 5 minutes, I'd like to show a progress bar when downloading data from oracle database to let me know if it is in progress of downloading data from oracle.
I googled a lot and fortunately I found a website where it use dbc.Progress() and how to update the progress bar by connecting to tqdm with a file. But I still not sure how to do it for my own example. I tried and it doesn't work, could anyone help me with that? Thank you so much for your help.
https://towardsdatascience.com/long-callbacks-in-dash-web-apps-72fd8de25937
here is my code
I defined one tab, I include progress bar using dbc.Progress() and graph
progress_bar_heatmap=dbc.Progress(value=25, striped=True, animated=True,
children=['25%'],color='success',
style={'height':'20px'},
id="progress_bar_heatmap")
loading_timer_progress = dcc.Interval(id='loading_timer_progress',
interval=1000)
heatmap_graph = dcc.Graph(id="heatmap-graph", **graph_kwargs)
#wrap contour in dcc.loading's chilren so we can see loading signal
heatmap_loading=dcc.Loading(
id='loading-heatmap',
type='default',
children=heatmap_graph # wrap contour in loading's children
)
dcc.Tab(
[progress_bar_heatmap,loading_timer_progress, heatmap_loading],
label=label,
value='heatmap',
id='heatmap-tab',
className="single-tab",
selected_className="single-tab--selected",
)
in callback, I copied some codes from the above website,
#app.callback(
[
Output("heatmap-graph", "figure"),
Output("progress_bar_dts_heatmap", "value"),
],
[
Input("plot-dts", "n_clicks"),
Input('loading_timer_progress', 'n_intervals'),
],
prevent_initial_call=True, # disable output in the first load
)
def change_plot(n_clicks,n_intervals):
progress_bar_value=0
import sys
try:
with open('progress.txt', 'r') as file:
str_raw = file.read()
last_line = list(filter(None, str_raw.split('\n')))[-1]
percent = float(last_line.split('%')[0])
except: # no progress file created meansing it is creating
percent = 0
std_err_backup = sys.stderr
file_prog = open('progress.txt', 'w')
sys.stderr = file_prog
df=time_consuming_function()
result_str = f'Long callback triggered by {btn_name}. Result: {x:.2f}'
file_prog.close()
sys.stderr = std_err_backup
finally: # must do under all circustances
text = f'{percent:.0f}%'
fig=create_fig(df)
inside the time_consuming function
def time_consuming_function():
download_data_from_oracle()
# after that, I added below as website did
for i in tqdm(range(20)):
time.sleep(0.5)
return df
it doesn't work above, not sure which one is wrong?

Weird KeyError (Python)

So, I have to work with this JSON (from URL):
{'player': {'racing': 25260.154000000017, 'player': 259114.57700000296}, 'farming': {'fishing': 33783.390999999414, 'mining': 29048.60500000002, 'farming': 25334.504000000023}, 'piloting': {'piloting': 25570.18800000001, 'cargos': 3080.713000000036, 'heli': 10433.977000000004}, 'physical': {'strength': 198358.86700000675}, 'business': {'business': 50922.88500000005}, 'trucking': {'mechanic': 2724.5620000000004, 'garbage': 755.642999999997, 'trucking': 223784.99700000713, 'postop': 1411.4190000000006}, 'train': {'bus': 669.1940000000001, 'train': 1363.805999999999}, 'ems': {'fire': 25449.43400000001, 'ems': 13844.628000000012}, 'hunting': {'skill': 4179.033000000316}, 'casino': {'casino': 18545.526000000027}}
It is indeed one line. I am trying to make it so that for example, I can get racing, which is the first one you see. For this, you need go into Player first, and then you can get to Racing. How do I do this?
My current code:
def allthethings():
# Grab all the skills
geturl = ("http://server.tycoon.community:30120/status/data/" + str(setting_playerid))
print(geturl)
a = requests.get(geturl,headers={"X-Tycoon-Key":setting_apikeyTT}).json()
jsonconverted = (a["data"]["gaptitudes_v"])
print(jsonconverted)
# Convert JSON into many, many variables
Raw_RACR = jsonconverted['player.racing']
print(Raw_RACR)
I believe this is all the code that is needed.
Also, this is the error:
KeyError: 'player.racing'

Trying to define a function that creates lists from files and uses random.choices to choose an element from the weighted lists

I'm trying to define a function that will create lists from multiple text files and print a random element from one of the weighted lists. I've managed to get the function to work with random.choice for a single list.
enter code here
def test_rollitems():
my_commons = open('common.txt')
all_common_lines = my_commons.readlines()
common = []
for i in all_common_lines:
common.append(i)
y = random.choice(common)
print(y)
When I tried adding a second list to the function it wouldn't work and my program just closes when the function is called.
enter code here
def Improved_rollitem():
#create the lists from the files#
my_commons = open('common.txt')
all_common_lines= my_commons.readlines()
common = []
for i in all_common_lines:
common.append(i)
my_uncommons = open('uncommon.txt')
all_uncommon_lines =my_uncommons.readlines()
uncommon =[]
for i in all_uncommon_lines:
uncommon.apend(i)
y = random.choices([common,uncommon], [80,20])
print(y)
Can anyone offer any insight into what I'm doing wrong or missing ?
Nevermind. I figured this out on my own! Was having issues with Geany so I installed Pycharm and was able to work through the issue. Correct code is:
enter code here
def Improved_rollitem():
#create the lists from the files#
my_commons = open('common.txt')
all_common_lines= my_commons.readlines()
common = []
for i in all_common_lines:
common.append(i)
my_uncommons = open('uncommon.txt')
all_uncommon_lines =my_uncommons.readlines()
uncommon =[]
for i in all_uncommon_lines:
uncommon.append(i)
y = random.choices([common,uncommon], [.8,.20])
if y == [common]:
for i in [common]:
print(random.choice(i))
if y == [uncommon]:
for i in [uncommon]:
print(random.choice(i))
If there's a better way to do something like this, it would certainly be cool to know though.

How to get dataset into array

I have worked all the tutorials and searched for "load csv tensorflow" but just can't get the logic of it all. I'm not a total beginner, but I don't have much time to complete this, and I've been suddenly thrown into Tensorflow, which is unexpectedly difficult.
Let me lay it out:
Very simple CSV file of 184 columns that are all float numbers. A row is simply today's price, three buy signals, and the previous 180 days prices
close = tf.placeholder(float, name='close')
signals = tf.placeholder(bool, shape=[3], name='signals')
previous = tf.placeholder(float, shape=[180], name = 'previous')
This article: https://www.tensorflow.org/guide/datasets
It covers how to load pretty well. It even has a section on changing to numpy arrays, which is what I need to train and test the 'net. However, as the author says in the article leading to this Web page, it is pretty complex. It seems like everything is geared toward doing data manipulation, where we have already normalized our data (nothing has really changed in AI since 1983 in terms of inputs, outputs, and layers).
Here is a way to load it, but not in to Numpy and no example of not manipulating the data.
with tf.Session as sess:
sess.run( tf.global variables initializer())
with open('/BTC1.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter =',')
line_count = 0
for row in csv_reader:
?????????
line_count += 1
I need to know how to get the csv file in to the
close = tf.placeholder(float, name='close')
signals = tf.placeholder(bool, shape=[3], name='signals')
previous = tf.placeholder(float, shape=[180], name = 'previous')
so that I can follow the tutorials to train and test the net.
It's not that clear for me your question. You might be answering, tell me if I'm wrong, how to feed data in your model? There are several fashions to do so.
Use placeholders with feed_dict during the session. This is the basic and easier one but often suffers from training performance issue. Further explanation, check this post.
Use queue. Hard to implement and badly documented, I don't suggest, because it's been taken over by the third method.
tf.data API.
...
So to answer your question by the first method:
# get your array outside the session
with open('/BTC1.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter =',')
dataset = np.asarray([data for data in csv_reader])
close_col = dataset[:, 0]
signal_cols = dataset[:, 1: 3]
previous_cols = dataset[:, 3:]
# let's say you load 100 row each time for training
batch_size = 100
# define placeholders like you
...
with tf.Session() as sess:
...
for i in range(number_iter):
start = i * batch_size
end = (i + 1) * batch_size
sess.run(train_operation, feed_dict={close: close_col[start: end, ],
signals: signal_col[start: end, ],
previous: previous_col[start: end, ]
}
)
By the third method:
# retrieve your columns like before
...
# let's say you load 100 row each time for training
batch_size = 100
# construct your input pipeline
c_col, s_col, p_col = wrapper(filename)
batch = tf.data.Dataset.from_tensor_slices((close_col, signal_col, previous_col))
batch = batch.shuffle(c_col.shape[0]).batch(batch_size) #mix data --> assemble batches --> prefetch to RAM and ready inject to model
iterator = batch.make_initializable_iterator()
iter_init_operation = iterator.initializer
c_it, s_it, p_it = iterator.get_next() #get next batch operation automatically called at each iteration within the session
# replace your close, signal, previous placeholder in your model by c_it, s_it, p_it when you define your model
...
with tf.Session() as sess:
# you need to initialize the iterators
sess.run([tf.global_variable_initializer, iter_init_operation])
...
for i in range(number_iter):
start = i * batch_size
end = (i + 1) * batch_size
sess.run(train_operation)
Good luck!

How do I rerender HTML PyQt4

I have managed to use suggested code in order to render HTML from a webpage and then parse, find and use the text as wanted. I'm using PyQt4. However, the webpage I am interested in is updated frequently and I want to rerender the page and check the updated HTML for new info.
I thus have a loop in my pythonscript so that I sort of start all over again. However, this makes the program crash. I have searched the net and found out that this is to be expected, but I have not found any suggestion on how to do it correctly. It must be simple, I guess?
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4.QtWebKit import *
class Render (QWebPage):
def __init__(self, url):
self.app = QApplication(sys.argv)
QWebPage.__init__(self)
self.loadFinished.connect(self._loadFinished)
self.mainFrame().load(QUrl(url))
self.app.exec_()
def _loadFinished(self, result):
self.frame = self.mainFrame()
self.app.quit()
r = Render(url)
html = r.frame.toHtml()
S,o when I hit r=Render(url) the second time, it crashes. S,o I am looking for something like r = Rerender(url).
As you might guess, I am not much of a programmer, and I usually get by by stealing code I barely understand. But this is the first time I can't find an answer, so I thought I should ask a question myself.
I hope my question is clear enough and that someone has the answer.
Simple demo (adapt to taste):
import sys, signal
from PyQt4 import QtCore, QtGui, QtWebKit
class WebPage(QtWebKit.QWebPage):
def __init__(self, url):
super(WebPage, self).__init__()
self.url = url
self.mainFrame().loadFinished.connect(self.handleLoadFinished)
self.refresh()
def refresh(self):
self.mainFrame().load(QtCore.QUrl(self.url))
def handleLoadFinished(self):
print('Loaded:', self.mainFrame().url().toString())
# do stuff with html ...
print('Reloading in 3 seconds...\n')
QtCore.QTimer.singleShot(2000, self.refresh)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
app = QtGui.QApplication(sys.argv)
webpage = WebPage('http://en.wikipedia.org/')
print('Press Ctrl+C to quit\n')
sys.exit(app.exec_())