Pyautogui locateCenterOnScreen not working - pyautogui

pyautogui locateCenterOnScreen is not working on my mac. I've downloaded Pillow and pyscreeze but stil having this issue :
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyautogui/init.py", line 175, in wrapper
return wrappedFunction(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyautogui/init.py", line 207, in locateCenterOnScreen
return pyscreeze.locateCenterOnScreen(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyscreeze/init.py", line 413, in locateCenterOnScreen
coords = locateOnScreen(image, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyscreeze/init.py", line 373, in locateOnScreen
retVal = locate(image, screenshotIm, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyscreeze/init.py", line 353, in locate
points = tuple(locateAll(needleImage, haystackImage, **kwargs))
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyscreeze/init.py", line 262, in _locateAll_python
needleFileObj = open(needleImage, 'rb')
FileNotFoundError: [Errno 2] No such file or directory: 'C://Users/tareq/Desktop/Code/Code of the future/Image recog/images/To pr.png'
my code :
import time
import pyautogui
time.sleep(3)
A = pyautogui.locateCenterOnScreen('C://Users/tareq/Desktop/Code/Code of the future/Image recog/images/To pr.png')
print(A)
print('done')
I've installed Pillow but still

Related

Op:__inference_predict_function_82024 error in neural network

i am getting this error when i run a simple neural network model using MNIST dataset.
Heres my model
customNN = Sequential()
# input layer
customNN.add(Dense(4, activation = "relu",input_shape = (28,28)))
# Hidden layer
customNN.add(Dense(16,activation = "relu"))
customNN.add(Dense(32,activation = "relu"))
customNN.add(Dense(64,activation = "relu"))
customNN.add(Dense(100,activation = "relu"))
customNN.add(Dense(128,activation = "relu"))
# flatten() function is used to get a copy of an given array collapsed into one dimension.
customNN.add(Flatten())
# output layer
customNN.add(Dense(10,activation = "softmax"))
when i compile it successfully done with MNIST dataset
customNN.compile(optimizer="adam", loss= "categorical_crossentropy", metrics=["accuracy"])
customNN.fit(xtrain,ytrain, epochs=10)
import cv2
def input_prepare(img):
img = np.asarray(img) # convert to array
img = cv2.resize(img, (28, 28 )) # resize to target shape
img = cv2.bitwise_not(img) # [optional] turned bg to black - {bitwise_not} turns 1's into 0's and 0's into 1's
img = img / 255 # normalize
img = img.reshape(1, 784) # reshape it to input placeholder shape
return img
img = cv2.imread('4.jpg')
orig = img.copy() # save for plotting later on
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # gray scaling
img = input_prepare(img)
pred = customNN.predict(img)
plt.imshow(cv2.cvtColor(orig, cv2.COLOR_BGR2RGB))
plt.title(np.argmax(pred, axis=1))
plt.show()
But when i run this code i am getting the following error
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-29-509e1856bd1e> in <module>
16
17
---> 18 pred = customNN.predict(img)
19 plt.imshow(cv2.cvtColor(orig, cv2.COLOR_BGR2RGB))
20 plt.title(np.argmax(pred, axis=1))
D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
D:\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
51 ctx.ensure_initialized()
52 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 53 inputs, attrs, num_outputs)
54 except core._NotOkStatusException as e:
55 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'sequential/dense/Tensordot/GatherV2_1' defined at (most recent call last):
File "D:\anaconda3\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "D:\anaconda3\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "D:\anaconda3\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\traitlets\config\application.py", line 1041, in launch_instance
app.start()
File "D:\anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 583, in start
self.io_loop.start()
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\platform\asyncio.py", line 215, in start
self.asyncio_loop.run_forever()
File "D:\anaconda3\lib\asyncio\base_events.py", line 541, in run_forever
self._run_once()
File "D:\anaconda3\lib\asyncio\base_events.py", line 1786, in _run_once
handle._run()
File "D:\anaconda3\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\ioloop.py", line 687, in <lambda>
lambda f: self._run_callback(functools.partial(callback, future))
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\ioloop.py", line 740, in _run_callback
ret = callback()
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 821, in inner
self.ctx_run(self.run)
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 782, in run
yielded = self.gen.send(value)
File "D:\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 361, in process_one
yield gen.maybe_future(dispatch(*args))
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 234, in wrapper
yielded = ctx_run(next, result)
File "D:\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 268, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 234, in wrapper
yielded = ctx_run(next, result)
File "D:\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 541, in execute_request
user_expressions, allow_stdin,
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 234, in wrapper
yielded = ctx_run(next, result)
File "D:\anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 300, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "D:\anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2976, in run_cell
raw_cell, store_history, silent, shell_futures, cell_id
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3030, in _run_cell
return runner(coro)
File "D:\anaconda3\lib\site-packages\IPython\core\async_helpers.py", line 78, in _pseudo_sync_runner
coro.send(None)
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3258, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3473, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3553, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-29-509e1856bd1e>", line 18, in <module>
pred = customNN.predict(img)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2350, in predict
tmp_batch_outputs = self.predict_function(iterator)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2137, in predict_function
return step_function(self, iterator)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2123, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2111, in run_step
outputs = model.predict_step(data)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2079, in predict_step
return self(x, training=False)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 561, in __call__
return super().__call__(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1132, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\sequential.py", line 413, in call
return super().call(inputs, training=training, mask=mask)
File "D:\anaconda3\lib\site-packages\keras\engine\functional.py", line 511, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File "D:\anaconda3\lib\site-packages\keras\engine\functional.py", line 668, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1132, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\layers\core\dense.py", line 244, in call
outputs = tf.tensordot(inputs, self.kernel, [[rank - 1], [0]])
Node: 'sequential/dense/Tensordot/GatherV2_1'
indices[0] = 2 is not in [0, 2)
[[{{node sequential/dense/Tensordot/GatherV2_1}}]] [Op:__inference_predict_function_82024]
Cannot figure out whats the problem? i am newbie in DL and trying to improve. any help to find out the problem is highly appreciated
thanks
i have tried a new image to detect the handwriting, but shows error

Dash Plotly error TypeError: Object of type DataFrame is not JSON serializable

Hello I am working with Dash for making dashboard.
Below is my code.
I tried to fix the error but not able to fix, Can anyone look into this?
on chrome i am getting. Error loading layout
I am getting TypeError
import dash_bootstrap_components as dbc
from dash import dcc
import dash_html_components as html
from dash import dash_table
import pandas as pd
import numpy as np
def getData():
return preprocess()
def back_to_df(dictio):
return pd.DataFrame.from_dict(dictio)
tblcols =[{"name": i, "id": i} for i in back_to_df(getData()).columns]
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
body = html.Div([
html.H1("Live rates")
, dbc.Row([
dbc.Col(html.Div([dcc.Interval('graph-update', interval = 80, n_intervals = 0),
dash_table.DataTable(
id = 'table',
data = getData(),
columns=tblcols,
page_size= 10,
style_table={'overflowX': 'auto'},
)]),width=3)
])
])
app.layout = html.Div([body])
#app.callback(
dash.dependencies.Output('table','data'),
[dash.dependencies.Input('graph-update', 'n_intervals')])
def updateTable(n):
return getData()
if __name__ == "__main__":
app.run_server(debug = False, port = 8010)
I tried to fix the error but not able to fix, Can anyone look into this?
I am getting error as follows.
Looking for help for below error. dash pandas plotly dataframe
Traceback (most recent call last):
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\_compat.py", line 39, in reraise
raise value
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "C:\Users\Admin\anaconda3\lib\site-packages\flask\app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "C:\Users\Admin\anaconda3\lib\site-packages\dash\dash.py", line 569, in serve_layout
to_json(layout),
File "C:\Users\Admin\anaconda3\lib\site-packages\dash\_utils.py", line 20, in to_json
return to_json_plotly(value)
File "C:\Users\Admin\anaconda3\lib\site-packages\plotly\io\_json.py", line 124, in to_json_plotly
return json.dumps(plotly_object, cls=PlotlyJSONEncoder, **opts)
File "C:\Users\Admin\anaconda3\lib\json\__init__.py", line 234, in dumps
return cls(
File "C:\Users\Admin\anaconda3\lib\site-packages\_plotly_utils\utils.py", line 59, in encode
encoded_o = super(PlotlyJSONEncoder, self).encode(o)
File "C:\Users\Admin\anaconda3\lib\json\encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Users\Admin\anaconda3\lib\json\encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "C:\Users\Admin\anaconda3\lib\site-packages\_plotly_utils\utils.py", line 136, in default
return _json.JSONEncoder.default(self, obj)
File "C:\Users\Admin\anaconda3\lib\json\encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type DataFrame is not JSON serializable```
Sounds like the getData function is returning a pandas DataFrame directly. That won't work. You'll need to do this:
return df.to_dict(orient='records')
That should work.

How To Handle CCXT Binance Intermittent Network Error

I stumbled across an issue which causes the below script to throw an error every so often, like every other day on average.
The script is being run 24/7 and dozens of instances similar to it are being run simultaneously. That seems to be relevant because as can be seen from the error, it appears to throw it on another instance (different asset than the one being retrieved).
OS: W10
Programming Language version: 3.9
CCXT version: 1.54.87
import ccxt
import pandas_ta as ta
import config
import schedule
import pandas as pd
from datetime import datetime
import time
import socket
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 250)
exchange = ccxt.binance({
'apiKey': config.BINANCE_API_KEY,
'secret': config.BINANCE_API_SECRET,
'enableRateLimit': True,
'options': {
'defaultType': 'future'
},
})
in_position = False
free_balance = exchange.fetch_free_balance()
used_balance = exchange.fetch_used_balance()
free_usd = (free_balance['USDT'])
used_usd = (used_balance['USDT'])
amount = free_usd + used_usd
quantity = 0
new_quantity = 0
def trigger(df):
// strategy
def algo():
print(f"Loading data as of {datetime.now().isoformat()}")
bars = exchange.fetch_ohlcv('BNB/USDT', timeframe='30m', limit=50)
df = pd.DataFrame(bars, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df['time'] = pd.to_datetime(df['time'], unit='ms')
df.set_index(pd.DatetimeIndex(df['time']), inplace=True)
trigger(df)
try:
schedule.every(2).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
except ConnectionResetError:
schedule.every(3).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
except socket.timeout:
schedule.every(3).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
Traceback (most recent call last):
File "C:\Users\", line 699, in urlopen
httplib_response = self._make_request(
File "C:\Users\", line 445, in _make_request
six.raise_from(e, None)
File "<string>", line 3, in raise_from
File "C:\Users\", line 440, in _make_request
httplib_response = conn.getresponse()
File "C:\Users\", line 1349, in getresponse
response.begin()
File "C:\Users\", line 316, in begin
version, status, reason = self._read_status()
File "C:\Users\", line 277, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "C:\Users\", line 704, in readinto
return self._sock.recv_into(b)
File "C:\Users\", line 1241, in recv_into
return self.read(nbytes, buffer)
File "C:\Users\", line 1099, in read
return self._sslobj.read(len, buffer)
ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\", line 439, in send
resp = conn.urlopen(
File "C:\Users\", line 755, in urlopen
retries = retries.increment(
File "C:\Users\", line 532, in increment
raise six.reraise(type(error), error, _stacktrace)
File "C:\Users\", line 769, in reraise
raise value.with_traceback(tb)
File "C:\Users\", line 699, in urlopen
httplib_response = self._make_request(
File "C:\Users\", line 445, in _make_request
six.raise_from(e, None)
File "<string>", line 3, in raise_from
File "C:\Users\", line 440, in _make_request
httplib_response = conn.getresponse()
File "C:\Users\", line 1349, in getresponse
response.begin()
File "C:\Users\", line 316, in begin
version, status, reason = self._read_status()
File "C:\Users\", line 277, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "C:\Users\", line 704, in readinto
return self._sock.recv_into(b)
File "C:\Users\", line 1241, in recv_into
return self.read(nbytes, buffer)
File "C:\Users\", line 1099, in read
return self._sslobj.read(len, buffer)
urllib3.exceptions.ProtocolError: ('Connection aborted.', ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\", line 571, in fetch
response = self.session.request(
File "C:\Users\", line 542, in request
resp = self.send(prep, **send_kwargs)
File "C:\Users\", line 655, in send
r = adapter.send(request, **kwargs)
File "C:\Users\", line 498, in send
raise ConnectionError(err, request=request)
requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\", line 79, in <module>
schedule.run_pending()
File "C:\Users\", line 780, in run_pending
default_scheduler.run_pending()
File "C:\Users\", line 100, in run_pending
self._run_job(job)
File "C:\Users\", line 172, in _run_job
ret = job.run()
File "C:\Users\", line 661, in run
ret = self.job_func()
File "C:\Users\", line 67, in algo
bars = exchange.fetch_ohlcv('ADA/USDT', timeframe='15m', limit=300)
File "C:\Users\", line 1724, in fetch_ohlcv
response = getattr(self, method)(self.extend(request, params))
File "C:\Users\", line 463, in inner
return entry(_self, **inner_kwargs)
File "C:\Users\", line 4119, in request
response = self.fetch2(path, api, method, params, headers, body)
File "C:\Users\", line 486, in fetch2
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
File "C:\Users\", line 623, in fetch
raise NetworkError(details) from e
ccxt.base.errors.NetworkError: binance GET https://fapi.binance.com/fapi/v1/klines?symbol=ADAUSDT&interval=15m&limit=300
I got the same problem, my browser was able to access the url fine, but pycharm ran with a network error, i m using proxy to access binance.com , and my pycharm proxy setting is manual and Connection detection is normal

Airflow DAG throws RecursionError triggered via Web console

I get following error, when I am trying to trigger DAG which is using custom BaseOperator.
here is the error,
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x80 in position 0: invalid start byte
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.7/site-packages/flask_admin/base.py", line 69, in inner
return self._run_view(f, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_admin/base.py", line 368, in _run_view
return fn(self, *args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/flask_login/utils.py", line 258, in decorated_view
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/utils.py", line 290, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/utils.py", line 337, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/www/views.py", line 1213, in trigger
external_trigger=True
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/dag.py", line 1659, in create_dagrun
session=session)
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 70, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/dag.py", line 1346, in create_dagrun
run.refresh_from_db()
File "/usr/local/lib/python3.7/site-packages/airflow/utils/db.py", line 74, in wrapper
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 109, in refresh_from_db
DR.run_id == self.run_id
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3347, in one
ret = self.one_or_none()
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3316, in one_or_none
ret = list(self)
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3389, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 3414, in _execute_and_instances
result = conn.execute(querycontext.statement, self._params)
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 982, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1101, in _execute_clauseelement
distilled_params,
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1250, in _execute_context
e, statement, parameters, cursor, context
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1478, in _handle_dbapi_exception
util.reraise(*exc_info)
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/util/compat.py", line 153, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/base.py", line 1246, in _execute_context
cursor, statement, parameters, context
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/engine/default.py", line 588, in do_execute
cursor.execute(statement, parameters)
File "/usr/local/lib/python3.7/site-packages/mysql/connector/cursor_cext.py", line 272, in execute
self._handle_result(result)
File "/usr/local/lib/python3.7/site-packages/mysql/connector/cursor_cext.py", line 163, in _handle_result
self._handle_resultset()
File "/usr/local/lib/python3.7/site-packages/mysql/connector/cursor_cext.py", line 651, in _handle_resultset
self._rows = self._cnx.get_rows()[0]
File "/usr/local/lib/python3.7/site-packages/mysql/connector/connection_cext.py", line 318, in get_rows
else self._cmysql.fetch_row()
SystemError: <method 'fetch_row' of '_mysql_connector.MySQL' objects> returned a result with an error set
My Code is as follows:
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
class TestOperator(BaseOperator):
template_fields = ('param1')
ui_color = '#A7E6A7'
#apply_defaults
def __init__(self,param1,*args, **kwargs):
self.param1 = param1
super(TestOperator, self).__init__(*args, **kwargs)
def execute(self):
print ('welcome to airflow')
class TestOperatorPlugin(AirflowPlugin):
name = "TestOperator_plugin"
operators = [TestOperator]
--here is Dag,
from TestOperator import TestOperator
from airflow import DAG
from datetime import datetime
prog_args = {
'depends_on_past': False, 'param1' : 'testOne'
}
testMYDAG = DAG('TestMYDAG', start_date = datetime(2020, 2, 18) , description='TestMYDAG', default_args = prog_args, schedule_interval=None)
testOp = TestOperator(task_id='test_dag', dag=testMYDAG )
testOp
Team, I had resolved issue by adding encoding scheme to utf-8 in airflow.cfg file, Also, it needs to be appended to sql_alchemy_connection string.

Run python errors.use kereas to implement CNN

I am learning Deep Learning and want to use python-kereas to implement CNN, but when I run in command, it looks like some errors.
This is my source code. https://github.com/lijhong/CNN-kereas.git
And my fault is like this:
Traceback (most recent call last):
File "/home/ah0818lijhong/CNN-kereas/cnn-kereas.py", line 167, in <module>
model.fit(x_train, y_train,epochs=3)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/models.py", line 845, in fit
initial_epoch=initial_epoch)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/engine/training.py", line 1485, in fit
initial_epoch=initial_epoch)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/engine/training.py", line 1140, in _fit_loop
outs = f(ins_batch)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 2073, in __call__
feed_dict=feed_dict)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 778, in run
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 778, in run
run_metadata_ptr)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 982, in _run
feed_dict_string, options, run_metadata)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1032, in _do_run
target_list, options, run_metadata)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1052, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,868] = 115873 is not in [0, 20001)
[[Node: embedding_1/Gather = Gather[Tindices=DT_INT32, Tparams=DT_FLOAT, validate_indices=true, _device="/job:localhost/replica:0/task:0/cpu:0"](embedding_1/embeddi
ngs/read, _recv_embedding_1_input_0)]]
Caused by op u'embedding_1/Gather', defined at:
File "/home/ah0818lijhong/CNN-kereas/cnn-kereas.py", line 122, in <module>
model_left.add(embedding_layer)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/models.py", line 422, in add
layer(x)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/engine/topology.py", line 554, in __call__
output = self.call(inputs, **kwargs)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/layers/embeddings.py", line 119, in call
out = K.gather(self.embeddings, inputs)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 966, in gather
return tf.gather(reference, indices)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1207, in gather
validate_indices=validate_indices, name=name)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 768, in apply_op
op_def=op_def)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1228, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): indices[0,868] = 115873 is not in [0, 20001)
[[Node: embedding_1/Gather = Gather[Tindices=DT_INT32, Tparams=DT_FLOAT, validate_indices=true, _device="/job:localhost/replica:0/task:0/cpu:0"](embedding_1/embeddi
ngs/read, _recv_embedding_1_input_0)]]
I hope someone can help me fix it.