Tensorflow tfrecords read error (CSV converted data) - csv

I have Tried my CSV data to tfrecords format and read tfrecords file.
but I can't read tfrecord file.
first I had converted CSV file to tfrecords file.
My CSV Data is this
1,2,3,4,5,1
2,3,4,5,6,2
3,4,5,6,7,3
4,5,6,7,8,4
Column 1~5 is features and column 6 is label
I converted this CSV file using this code
import pandas as pd
import tensorflow as tf
import argparse
import os
import sys
FLAGS = None
name = 'test'
filename = name + '.tfrecords'
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for row in csv:
features, label = row[:-1], row[-1]
print(features)
print(label)
example = tf.train.Example()
example.features.feature['features'].int64_list.value.extend(features)
example.features.feature['label'].int64_list.value.append(label)
writer.write(example.SerializeToString())
writer.close()
and then I Got a tfrecords file. (test.tfrecords)
My problem is that I can't read this tfrecords file.
I had tried this code and I got a Error message.
def read_and_decode(filename_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'features': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)
})
image = tf.decode_raw(features['features'], tf.int64)
label = tf.cast(features['label'], tf.int32)
return image, label
def get_all_records(FILE):
with tf.Session() as sess:
filename_queue = tf.train.string_input_producer([ FILE ])
image, label = read_and_decode(filename_queue)
init_op = tf.global_variables_initializer()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(100):
example, l = sess.run([image, label])
print (example,l)
coord.request_stop()
coord.join(threads)
get_all_records('test.tfrecords')
this is Error message
INFO:tensorflow:Error reported to Coordinator: <class 'tensorflow.python.framework.errors_impl.CancelledError'>, Run call was cancelled
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1021 try:
-> 1022 return fn(*args)
1023 except errors.OpError as e:
C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
1003 feed_dict, fetch_list, target_list,
-> 1004 status, run_metadata)
1005
C:\Program Files\Anaconda3\envs\tf\lib\contextlib.py in __exit__(self, type, value, traceback)
65 try:
---> 66 next(self.gen)
67 except StopIteration:
C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\framework\errors_impl.py in raise_exception_on_not_ok_status()
468 compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 469 pywrap_tensorflow.TF_GetCode(status))
470 finally:
InvalidArgumentError: Name: <unknown>, Key: features, Index: 0. Data types don't match. Data type: int64Expected type: string
[[Node: ParseSingleExample/ParseExample/ParseExample = ParseExample[Ndense=2, Nsparse=0, Tdense=[DT_STRING, DT_INT64], dense_shapes=[[], []], sparse_types=[], _device="/job:localhost/replica:0/task:0/cpu:0"](ParseSingleExample/ExpandDims, ParseSingleExample/ParseExample/ParseExample/names, ParseSingleExample/ParseExample/ParseExample/dense_keys_0, ParseSingleExample/ParseExample/ParseExample/dense_keys_1, ParseSingleExample/ParseExample/Const, ParseSingleExample/ParseExample/Const_1)]]
[[Node: ParseSingleExample/ParseExample/ParseExample/_3 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_13_ParseSingleExample/ParseExample/ParseExample", tensor_type=DT_INT64, _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-6-0bf78295d664> in <module>()
----> 1 get_all_records('test.tfrecords')
<ipython-input-5-afbcd262993d> in get_all_records(FILE)
8 threads = tf.train.start_queue_runners(coord=coord)
9 for i in range(100):
---> 10 example, l = sess.run([image, label])
11 print (example,l)
12 coord.request_stop()
C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
765 try:
766 result = self._run(None, fetches, feed_dict, options_ptr,
--> 767 run_metadata_ptr)
768 if run_metadata:
769 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
963 if final_fetches or final_targets:
964 results = self._do_run(handle, final_targets, final_fetches,
--> 965 feed_dict_string, options, run_metadata)
966 else:
967 results = []
C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
1013 if handle is None:
1014 return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1015 target_list, options, run_metadata)
1016 else:
1017 return self._do_call(_prun_fn, self._session, handle, feed_dict,
C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1033 except KeyError:
1034 pass
-> 1035 raise type(e)(node_def, op, message)
1036
1037 def _extend_graph(self):
InvalidArgumentError: Name: <unknown>, Key: features, Index: 0. Data types don't match. Data type: int64Expected type: string
[[Node: ParseSingleExample/ParseExample/ParseExample = ParseExample[Ndense=2, Nsparse=0, Tdense=[DT_STRING, DT_INT64], dense_shapes=[[], []], sparse_types=[], _device="/job:localhost/replica:0/task:0/cpu:0"](ParseSingleExample/ExpandDims, ParseSingleExample/ParseExample/ParseExample/names, ParseSingleExample/ParseExample/ParseExample/dense_keys_0, ParseSingleExample/ParseExample/ParseExample/dense_keys_1, ParseSingleExample/ParseExample/Const, ParseSingleExample/ParseExample/Const_1)]]
[[Node: ParseSingleExample/ParseExample/ParseExample/_3 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_13_ParseSingleExample/ParseExample/ParseExample", tensor_type=DT_INT64, _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
Caused by op 'ParseSingleExample/ParseExample/ParseExample', defined at:
File "C:\Program Files\Anaconda3\envs\tf\lib\runpy.py", line 184, in _run_module_as_main
"__main__", mod_spec)
File "C:\Program Files\Anaconda3\envs\tf\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\ipykernel\__main__.py", line 3, in <module>
app.launch_new_instance()
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\traitlets\config\application.py", line 658, in launch_instance
app.start()
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\ipykernel\kernelapp.py", line 474, in start
ioloop.IOLoop.instance().start()
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\zmq\eventloop\ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tornado\ioloop.py", line 887, in start
handler_func(fd_obj, events)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tornado\stack_context.py", line 275, in null_wrapper
return fn(*args, **kwargs)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\zmq\eventloop\zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\zmq\eventloop\zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\zmq\eventloop\zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tornado\stack_context.py", line 275, in null_wrapper
return fn(*args, **kwargs)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\ipykernel\kernelbase.py", line 276, in dispatcher
return self.dispatch_shell(stream, msg)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\ipykernel\kernelbase.py", line 228, in dispatch_shell
handler(stream, idents, msg)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\ipykernel\kernelbase.py", line 390, in execute_request
user_expressions, allow_stdin)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\ipykernel\ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\ipykernel\zmqshell.py", line 501, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\IPython\core\interactiveshell.py", line 2717, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\IPython\core\interactiveshell.py", line 2827, in run_ast_nodes
if self.run_code(code, result):
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\IPython\core\interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-6-0bf78295d664>", line 1, in <module>
get_all_records('test.tfrecords')
File "<ipython-input-5-afbcd262993d>", line 4, in get_all_records
image, label = read_and_decode(filename_queue)
File "<ipython-input-4-a06d5222e475>", line 9, in read_and_decode
'label': tf.FixedLenFeature([], tf.int64)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\ops\parsing_ops.py", line 595, in parse_single_example
dense_types, dense_defaults, dense_shapes, name)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\ops\parsing_ops.py", line 669, in _parse_single_example_raw
name=name)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\ops\parsing_ops.py", line 544, in _parse_example_raw
name=name)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\ops\gen_parsing_ops.py", line 167, in _parse_example
dense_shapes=dense_shapes, name=name)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 763, in apply_op
op_def=op_def)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\framework\ops.py", line 2395, in create_op
original_op=self._default_original_op, op_def=op_def)
File "C:\Program Files\Anaconda3\envs\tf\lib\site-packages\tensorflow\python\framework\ops.py", line 1264, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): Name: <unknown>, Key: features, Index: 0. Data types don't match. Data type: int64Expected type: string
[[Node: ParseSingleExample/ParseExample/ParseExample = ParseExample[Ndense=2, Nsparse=0, Tdense=[DT_STRING, DT_INT64], dense_shapes=[[], []], sparse_types=[], _device="/job:localhost/replica:0/task:0/cpu:0"](ParseSingleExample/ExpandDims, ParseSingleExample/ParseExample/ParseExample/names, ParseSingleExample/ParseExample/ParseExample/dense_keys_0, ParseSingleExample/ParseExample/ParseExample/dense_keys_1, ParseSingleExample/ParseExample/Const, ParseSingleExample/ParseExample/Const_1)]]
[[Node: ParseSingleExample/ParseExample/ParseExample/_3 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/cpu:0", send_device_incarnation=1, tensor_name="edge_13_ParseSingleExample/ParseExample/ParseExample", tensor_type=DT_INT64, _device="/job:localhost/replica:0/task:0/gpu:0"]()]]
What's wrong is this?

The error occurs because in the program where you wrote the data, you created the "features" feature with type int64:
example.features.feature['features'].int64_list.value.extend(features)
...whereas in the program where you try to read the data, you specify that the "features" feature has type tf.string:
features = tf.parse_single_example(
...
features={
'features': tf.FixedLenFeature([], tf.string),
...
})
The solution most likely to work is to change the call to tf.parse_single_example() in your reading program to match the true type of the data:
features = tf.parse_single_example(
serialized_example,
features={
'features': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64)
})

Related

Op:__inference_predict_function_82024 error in neural network

i am getting this error when i run a simple neural network model using MNIST dataset.
Heres my model
customNN = Sequential()
# input layer
customNN.add(Dense(4, activation = "relu",input_shape = (28,28)))
# Hidden layer
customNN.add(Dense(16,activation = "relu"))
customNN.add(Dense(32,activation = "relu"))
customNN.add(Dense(64,activation = "relu"))
customNN.add(Dense(100,activation = "relu"))
customNN.add(Dense(128,activation = "relu"))
# flatten() function is used to get a copy of an given array collapsed into one dimension.
customNN.add(Flatten())
# output layer
customNN.add(Dense(10,activation = "softmax"))
when i compile it successfully done with MNIST dataset
customNN.compile(optimizer="adam", loss= "categorical_crossentropy", metrics=["accuracy"])
customNN.fit(xtrain,ytrain, epochs=10)
import cv2
def input_prepare(img):
img = np.asarray(img) # convert to array
img = cv2.resize(img, (28, 28 )) # resize to target shape
img = cv2.bitwise_not(img) # [optional] turned bg to black - {bitwise_not} turns 1's into 0's and 0's into 1's
img = img / 255 # normalize
img = img.reshape(1, 784) # reshape it to input placeholder shape
return img
img = cv2.imread('4.jpg')
orig = img.copy() # save for plotting later on
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # gray scaling
img = input_prepare(img)
pred = customNN.predict(img)
plt.imshow(cv2.cvtColor(orig, cv2.COLOR_BGR2RGB))
plt.title(np.argmax(pred, axis=1))
plt.show()
But when i run this code i am getting the following error
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-29-509e1856bd1e> in <module>
16
17
---> 18 pred = customNN.predict(img)
19 plt.imshow(cv2.cvtColor(orig, cv2.COLOR_BGR2RGB))
20 plt.title(np.argmax(pred, axis=1))
D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
D:\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
51 ctx.ensure_initialized()
52 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 53 inputs, attrs, num_outputs)
54 except core._NotOkStatusException as e:
55 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'sequential/dense/Tensordot/GatherV2_1' defined at (most recent call last):
File "D:\anaconda3\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "D:\anaconda3\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "D:\anaconda3\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\traitlets\config\application.py", line 1041, in launch_instance
app.start()
File "D:\anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 583, in start
self.io_loop.start()
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\platform\asyncio.py", line 215, in start
self.asyncio_loop.run_forever()
File "D:\anaconda3\lib\asyncio\base_events.py", line 541, in run_forever
self._run_once()
File "D:\anaconda3\lib\asyncio\base_events.py", line 1786, in _run_once
handle._run()
File "D:\anaconda3\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\ioloop.py", line 687, in <lambda>
lambda f: self._run_callback(functools.partial(callback, future))
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\ioloop.py", line 740, in _run_callback
ret = callback()
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 821, in inner
self.ctx_run(self.run)
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 782, in run
yielded = self.gen.send(value)
File "D:\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 361, in process_one
yield gen.maybe_future(dispatch(*args))
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 234, in wrapper
yielded = ctx_run(next, result)
File "D:\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 268, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 234, in wrapper
yielded = ctx_run(next, result)
File "D:\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 541, in execute_request
user_expressions, allow_stdin,
File "C:\Users\Deadpool\AppData\Roaming\Python\Python37\site-packages\tornado\gen.py", line 234, in wrapper
yielded = ctx_run(next, result)
File "D:\anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 300, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "D:\anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2976, in run_cell
raw_cell, store_history, silent, shell_futures, cell_id
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3030, in _run_cell
return runner(coro)
File "D:\anaconda3\lib\site-packages\IPython\core\async_helpers.py", line 78, in _pseudo_sync_runner
coro.send(None)
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3258, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3473, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "D:\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3553, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-29-509e1856bd1e>", line 18, in <module>
pred = customNN.predict(img)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2350, in predict
tmp_batch_outputs = self.predict_function(iterator)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2137, in predict_function
return step_function(self, iterator)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2123, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2111, in run_step
outputs = model.predict_step(data)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 2079, in predict_step
return self(x, training=False)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\training.py", line 561, in __call__
return super().__call__(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1132, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\sequential.py", line 413, in call
return super().call(inputs, training=training, mask=mask)
File "D:\anaconda3\lib\site-packages\keras\engine\functional.py", line 511, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File "D:\anaconda3\lib\site-packages\keras\engine\functional.py", line 668, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1132, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
return fn(*args, **kwargs)
File "D:\anaconda3\lib\site-packages\keras\layers\core\dense.py", line 244, in call
outputs = tf.tensordot(inputs, self.kernel, [[rank - 1], [0]])
Node: 'sequential/dense/Tensordot/GatherV2_1'
indices[0] = 2 is not in [0, 2)
[[{{node sequential/dense/Tensordot/GatherV2_1}}]] [Op:__inference_predict_function_82024]
Cannot figure out whats the problem? i am newbie in DL and trying to improve. any help to find out the problem is highly appreciated
thanks
i have tried a new image to detect the handwriting, but shows error

Having "make_aware expects a naive datetime" while migrate

I have developed an application with Django.
This is working fine in my PC with sqlite backend.
But when I am trying to go live with linux server and mysql backend then I am getting bellow error while first time migration.
(env-bulkmailer) [root#localhost bulkmailer]# python3 manage.py migrate
Traceback (most recent call last):
File "/var/www/bulkmailer-folder/bulkmailer/manage.py", line 22, in <module>
main()
File "/var/www/bulkmailer-folder/bulkmailer/manage.py", line 18, in main
execute_from_command_line(sys.argv)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/core/management/__init__.py", line 446, in execute_from_command_line
utility.execute()
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/core/management/__init__.py", line 440, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/core/management/base.py", line 402, in run_from_argv
self.execute(*args, **cmd_options)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/core/management/base.py", line 448, in execute
output = self.handle(*args, **options)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/core/management/base.py", line 96, in wrapped
res = handle_func(*args, **kwargs)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/core/management/commands/migrate.py", line 114, in handle
executor = MigrationExecutor(connection, self.migration_progress_callback)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/migrations/executor.py", line 18, in __init__
self.loader = MigrationLoader(self.connection)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/migrations/loader.py", line 58, in __init__
self.build_graph()
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/migrations/loader.py", line 235, in build_graph
self.applied_migrations = recorder.applied_migrations()
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/migrations/recorder.py", line 82, in applied_migrations
return {
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/models/query.py", line 394, in __iter__
self._fetch_all()
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/models/query.py", line 1866, in _fetch_all
self._result_cache = list(self._iterable_class(self))
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/models/query.py", line 117, in __iter__
for row in compiler.results_iter(results):
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/models/sql/compiler.py", line 1336, in apply_converters
value = converter(value, expression, connection)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/db/backends/mysql/operations.py", line 331, in convert_datetimefield_value
value = timezone.make_aware(value, self.connection.timezone)
File "/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/utils/timezone.py", line 291, in make_aware
raise ValueError("make_aware expects a naive datetime, got %s" % value)
ValueError: make_aware expects a naive datetime, got 2022-11-20 12:39:18.866299+00:00
In settings-
USE_TZ = True
I have run mysql_tzinfo_to_sql /usr/share/zoneinfo | mysql -u root mysql also as django doc.
I am using django 4.1.3 and mysql community 8.0.30
Thanks in advance.
Ran into the same issue. At some point, django assumes that the the data is timezone-naive without checking. Here's the work-around.
Update the make_aware function that is listed in your stack trace here:
/var/www/bulkmailer-folder/env-bulkmailer/lib64/python3.9/site-packages/django/utils/timezone.py", line 291, in make_aware
Instead of raising an error if the value is already aware, just return the aware value. See the last else statement below.
def make_aware(value, timezone=None, is_dst=NOT_PASSED):
"""Make a naive datetime.datetime in a given time zone aware."""
if is_dst is NOT_PASSED:
is_dst = None
else:
warnings.warn(
"The is_dst argument to make_aware(), used by the Trunc() "
"database functions and QuerySet.datetimes(), is deprecated as it "
"has no effect with zoneinfo time zones.",
RemovedInDjango50Warning,
)
if timezone is None:
timezone = get_current_timezone()
if _is_pytz_zone(timezone):
# This method is available for pytz time zones.
return timezone.localize(value, is_dst=is_dst)
else:
# Check that we won't overwrite the timezone of an aware datetime.
if is_aware(value):
# ADD THIS
return value
# REMOVE THE FOLLOWING LINE
# raise ValueError("make_aware expects a naive datetime, got %s" % value)
# This may be wrong around DST changes!
return value.replace(tzinfo=timezone)

Pyautogui locateCenterOnScreen not working

pyautogui locateCenterOnScreen is not working on my mac. I've downloaded Pillow and pyscreeze but stil having this issue :
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyautogui/init.py", line 175, in wrapper
return wrappedFunction(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyautogui/init.py", line 207, in locateCenterOnScreen
return pyscreeze.locateCenterOnScreen(*args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyscreeze/init.py", line 413, in locateCenterOnScreen
coords = locateOnScreen(image, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyscreeze/init.py", line 373, in locateOnScreen
retVal = locate(image, screenshotIm, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyscreeze/init.py", line 353, in locate
points = tuple(locateAll(needleImage, haystackImage, **kwargs))
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/pyscreeze/init.py", line 262, in _locateAll_python
needleFileObj = open(needleImage, 'rb')
FileNotFoundError: [Errno 2] No such file or directory: 'C://Users/tareq/Desktop/Code/Code of the future/Image recog/images/To pr.png'
my code :
import time
import pyautogui
time.sleep(3)
A = pyautogui.locateCenterOnScreen('C://Users/tareq/Desktop/Code/Code of the future/Image recog/images/To pr.png')
print(A)
print('done')
I've installed Pillow but still

How To Handle CCXT Binance Intermittent Network Error

I stumbled across an issue which causes the below script to throw an error every so often, like every other day on average.
The script is being run 24/7 and dozens of instances similar to it are being run simultaneously. That seems to be relevant because as can be seen from the error, it appears to throw it on another instance (different asset than the one being retrieved).
OS: W10
Programming Language version: 3.9
CCXT version: 1.54.87
import ccxt
import pandas_ta as ta
import config
import schedule
import pandas as pd
from datetime import datetime
import time
import socket
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 250)
exchange = ccxt.binance({
'apiKey': config.BINANCE_API_KEY,
'secret': config.BINANCE_API_SECRET,
'enableRateLimit': True,
'options': {
'defaultType': 'future'
},
})
in_position = False
free_balance = exchange.fetch_free_balance()
used_balance = exchange.fetch_used_balance()
free_usd = (free_balance['USDT'])
used_usd = (used_balance['USDT'])
amount = free_usd + used_usd
quantity = 0
new_quantity = 0
def trigger(df):
// strategy
def algo():
print(f"Loading data as of {datetime.now().isoformat()}")
bars = exchange.fetch_ohlcv('BNB/USDT', timeframe='30m', limit=50)
df = pd.DataFrame(bars, columns=['time', 'open', 'high', 'low', 'close', 'volume'])
df['time'] = pd.to_datetime(df['time'], unit='ms')
df.set_index(pd.DatetimeIndex(df['time']), inplace=True)
trigger(df)
try:
schedule.every(2).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
except ConnectionResetError:
schedule.every(3).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
except socket.timeout:
schedule.every(3).seconds.do(algo)
while True:
schedule.run_pending()
time.sleep(1)
Traceback (most recent call last):
File "C:\Users\", line 699, in urlopen
httplib_response = self._make_request(
File "C:\Users\", line 445, in _make_request
six.raise_from(e, None)
File "<string>", line 3, in raise_from
File "C:\Users\", line 440, in _make_request
httplib_response = conn.getresponse()
File "C:\Users\", line 1349, in getresponse
response.begin()
File "C:\Users\", line 316, in begin
version, status, reason = self._read_status()
File "C:\Users\", line 277, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "C:\Users\", line 704, in readinto
return self._sock.recv_into(b)
File "C:\Users\", line 1241, in recv_into
return self.read(nbytes, buffer)
File "C:\Users\", line 1099, in read
return self._sslobj.read(len, buffer)
ConnectionResetError: [WinError 10054] An existing connection was forcibly closed by the remote host
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\", line 439, in send
resp = conn.urlopen(
File "C:\Users\", line 755, in urlopen
retries = retries.increment(
File "C:\Users\", line 532, in increment
raise six.reraise(type(error), error, _stacktrace)
File "C:\Users\", line 769, in reraise
raise value.with_traceback(tb)
File "C:\Users\", line 699, in urlopen
httplib_response = self._make_request(
File "C:\Users\", line 445, in _make_request
six.raise_from(e, None)
File "<string>", line 3, in raise_from
File "C:\Users\", line 440, in _make_request
httplib_response = conn.getresponse()
File "C:\Users\", line 1349, in getresponse
response.begin()
File "C:\Users\", line 316, in begin
version, status, reason = self._read_status()
File "C:\Users\", line 277, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "C:\Users\", line 704, in readinto
return self._sock.recv_into(b)
File "C:\Users\", line 1241, in recv_into
return self.read(nbytes, buffer)
File "C:\Users\", line 1099, in read
return self._sslobj.read(len, buffer)
urllib3.exceptions.ProtocolError: ('Connection aborted.', ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\", line 571, in fetch
response = self.session.request(
File "C:\Users\", line 542, in request
resp = self.send(prep, **send_kwargs)
File "C:\Users\", line 655, in send
r = adapter.send(request, **kwargs)
File "C:\Users\", line 498, in send
raise ConnectionError(err, request=request)
requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\", line 79, in <module>
schedule.run_pending()
File "C:\Users\", line 780, in run_pending
default_scheduler.run_pending()
File "C:\Users\", line 100, in run_pending
self._run_job(job)
File "C:\Users\", line 172, in _run_job
ret = job.run()
File "C:\Users\", line 661, in run
ret = self.job_func()
File "C:\Users\", line 67, in algo
bars = exchange.fetch_ohlcv('ADA/USDT', timeframe='15m', limit=300)
File "C:\Users\", line 1724, in fetch_ohlcv
response = getattr(self, method)(self.extend(request, params))
File "C:\Users\", line 463, in inner
return entry(_self, **inner_kwargs)
File "C:\Users\", line 4119, in request
response = self.fetch2(path, api, method, params, headers, body)
File "C:\Users\", line 486, in fetch2
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
File "C:\Users\", line 623, in fetch
raise NetworkError(details) from e
ccxt.base.errors.NetworkError: binance GET https://fapi.binance.com/fapi/v1/klines?symbol=ADAUSDT&interval=15m&limit=300
I got the same problem, my browser was able to access the url fine, but pycharm ran with a network error, i m using proxy to access binance.com , and my pycharm proxy setting is manual and Connection detection is normal

Run python errors.use kereas to implement CNN

I am learning Deep Learning and want to use python-kereas to implement CNN, but when I run in command, it looks like some errors.
This is my source code. https://github.com/lijhong/CNN-kereas.git
And my fault is like this:
Traceback (most recent call last):
File "/home/ah0818lijhong/CNN-kereas/cnn-kereas.py", line 167, in <module>
model.fit(x_train, y_train,epochs=3)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/models.py", line 845, in fit
initial_epoch=initial_epoch)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/engine/training.py", line 1485, in fit
initial_epoch=initial_epoch)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/engine/training.py", line 1140, in _fit_loop
outs = f(ins_batch)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 2073, in __call__
feed_dict=feed_dict)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 778, in run
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 778, in run
run_metadata_ptr)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 982, in _run
feed_dict_string, options, run_metadata)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1032, in _do_run
target_list, options, run_metadata)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1052, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0,868] = 115873 is not in [0, 20001)
[[Node: embedding_1/Gather = Gather[Tindices=DT_INT32, Tparams=DT_FLOAT, validate_indices=true, _device="/job:localhost/replica:0/task:0/cpu:0"](embedding_1/embeddi
ngs/read, _recv_embedding_1_input_0)]]
Caused by op u'embedding_1/Gather', defined at:
File "/home/ah0818lijhong/CNN-kereas/cnn-kereas.py", line 122, in <module>
model_left.add(embedding_layer)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/models.py", line 422, in add
layer(x)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/engine/topology.py", line 554, in __call__
output = self.call(inputs, **kwargs)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/layers/embeddings.py", line 119, in call
out = K.gather(self.embeddings, inputs)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py", line 966, in gather
return tf.gather(reference, indices)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1207, in gather
validate_indices=validate_indices, name=name)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 768, in apply_op
op_def=op_def)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/home/ah0818lijhong/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 1228, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): indices[0,868] = 115873 is not in [0, 20001)
[[Node: embedding_1/Gather = Gather[Tindices=DT_INT32, Tparams=DT_FLOAT, validate_indices=true, _device="/job:localhost/replica:0/task:0/cpu:0"](embedding_1/embeddi
ngs/read, _recv_embedding_1_input_0)]]
I hope someone can help me fix it.