I've been trying to solve the pong atari with a DQN. I'm using OpenAI gym for the pong environment.
I've made a custom ObservationWrapper but I'm unable to figure out whats the problem with the reset() method I've overriden.
Error:
Traceback (most recent call last):
File "C:\Users\berna\Documents\Pytorch Experiment\Torching the Dead Grass\DeepQLearning\training.py", line 123, in <module>
agent = Agent(env, buffer)
File "C:\Users\berna\Documents\Pytorch Experiment\Torching the Dead Grass\DeepQLearning\training.py", line 56, in __init__
self._reset()
File "C:\Users\berna\Documents\Pytorch Experiment\Torching the Dead Grass\DeepQLearning\training.py", line 59, in _reset
self.state = env.reset()
File "C:\Users\berna\AppData\Local\Programs\Python\Python310\lib\site-packages\gym\core.py", line 379, in reset
obs, info = self.env.reset(**kwargs)
File "C:\Users\berna\Documents\Pytorch Experiment\Torching the Dead Grass\DeepQLearning\wrappers.py", line 106, in reset
return self.observation(self.env.reset())
File "C:\Users\berna\AppData\Local\Programs\Python\Python310\lib\site-packages\gym\core.py", line 379, in reset
obs, info = self.env.reset(**kwargs)
File "C:\Users\berna\AppData\Local\Programs\Python\Python310\lib\site-packages\gym\core.py", line 379, in reset
obs, info = self.env.reset(**kwargs)
ValueError: too many values to unpack (expected 2)
Process finished with exit code 1
and the code:
Agent:
class Agent:
def __init__(self, env, exp_buffer):
self.env = env
self.exp_buffer = exp_buffer
self._reset()
def _reset(self):
self.state = env.reset()
self.total_reward = 0.0
wrapper:
class BufferWrapper(gym.ObservationWrapper):
def __init__(self, env, n_steps, dtype=np.float32):
super(BufferWrapper, self).__init__(env)
self.dtype = dtype
old_space = env.observation_space
self.observation_space = gym.spaces.Box(old_space.low.repeat(n_steps, axis=0),
old_space.high.repeat(n_steps, axis=0), dtype=dtype)
def reset(self):
self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)
return self.observation(self.env.reset())
def observation(self, observation):
self.buffer[:-1] = self.buffer[1:]
self.buffer[-1] = observation
return self.buffer
Can someone helping me understand why I'm receiving that error?
Related
In given below if Medicine table consists medicine_name then query execute fine, but when medicine name doesn't exist on the Corresponding table then error is occured.
Matching Query Doesn't Exists
views.py Code
#api_view(['POST'])
def addPeople(request):
m = People()
m.bp_no = request.POST['bp_no']
m.name = request.POST['name']
m.corporation_name = request.POST['corporation_name']
m.medicine_name = request.POST['medicine_name']
m.no_of_medicine = request.POST['no_of_medicine']
existing = Medicine.objects.get(medicine_name=m.medicine_name).no_of_medicine - int(m.no_of_medicine)
p_key = Medicine.objects.get(medicine_name=m.medicine_name).id
if Medicine.objects.filter(medicine_name=m.medicine_name).exists():
if existing > 0:
m.save()
Medicine.objects.filter(id=p_key).update(no_of_medicine=existing)
return Response({"message": "Successfully Recorded"})
else:
return Response({"message": "Not much Medicine Stored"})
else:
return Response({"message": "Medicine is not Stored"})
models.py
class People(models.Model):
bp_no = models.IntegerField(blank=False,null=False)
name = models.CharField(blank=False,null=False,max_length=200)
corporation_name = models.CharField(blank=False,null=False,max_length=200)
medicine_name = models.CharField(blank=False,null=False,max_length=200)
no_of_medicine = models.IntegerField()
class Medicine(models.Model):
medicine_name = models.CharField(null=False,blank=False,max_length=200)
no_of_medicine = models.IntegerField(null=False,blank=False)
def __str__(self):
return self.medicine_name
Error Traceback: When Medicine table doesn't contains the corresponding filter name then This error will be shown
Internal Server Error: /api/add-people/
Traceback (most recent call last):
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\django\core\handlers\base.py", line 181, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\django\views\decorators\csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\django\views\generic\base.py", line 70, in view
return self.dispatch(request, *args, **kwargs)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\rest_framework\views.py", line 509, in dispatch
response = self.handle_exception(exc)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\rest_framework\views.py", line 469, in handle_exception
self.raise_uncaught_exception(exc)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\rest_framework\views.py", line 480, in raise_uncaught_exception
raise exc
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\rest_framework\views.py", line 506, in dispatch
response = handler(request, *args, **kwargs)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\rest_framework\decorators.py", line 50, in handler
return func(*args, **kwargs)
File "E:\django\backend\medirecords\api\views.py", line 37, in addPeople
existing = Medicine.objects.get(medicine_name=m.medicine_name).no_of_medicine - int(m.no_of_medicine)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\django\db\models\manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "C:\Users\MonirHossain\AppData\Roaming\Python\Python39\site-packages\django\db\models\query.py", line 435, in get
raise self.model.DoesNotExist(
api.models.Medicine.DoesNotExist: Medicine matching query does not exist.
I haven't tested this, but purely in terms of handling objects not found, I have a suggestion you can try to simplify the conditions / flow.
.get(): Wrap in try / except
get(*args, **kwargs) raises Model.DoesNotExist when not found.
When there's a not found, you need to determine how you want to handle it. For example, you can have a default fallback value, or raise an error.
In this case the code would fail if the existing medicine wasn't found, so I will have it raise.
#api_view(["POST"])
def addPeople(request):
medicine_name = request.POST["medicine_name"]
try:
medicine = Medicine.objects.get(medicine_name=medicine_name)
except Medicine.DoesNotExist:
return Response({"message": "Not much Medicine Stored"})
m = People()
m.bp_no = request.POST["bp_no"]
m.name = request.POST["name"]
m.corporation_name = request.POST["corporation_name"]
m.medicine_name = medicine_name
m.no_of_medicine = request.POST["no_of_medicine"]
existing = medicine.no_of_medicine - int(m.no_of_medicine)
p_key = medicine.id
if medicine.exists():
if existing > 0:
m.save()
medicine.num_of_medicine = existing
medicine.save(update_fields=["num_of_medicine"])
return Response({"message": "Successfully Recorded"})
else:
return Response({"message": "Not much Medicine Stored"})
else:
return Response({"message": "Medicine is not Stored"})
Move object check earlier
Since the request relies on Medicine, this is should be caught early as possible.
Uses try/catch as .get() will always raise when object is not found
The alternative is to do .exists() or filter(*args, **kwargs) + .first()
Update dependent model via Model.save() and limit field updated to num_of_medicine via update_fields.
get_object_or_404()
get_object_or_404(klass, *args, **kwargs) can also be used in lieu of the earlier try/catch block:
from django.shortcuts import get_object_or_404
medicine = get_object_or_404(Medicine.objects.all(), medicine_name=m.medicine_name)
existing = medicine.no_of_medicine - int(m.no_of_medicine)
Final note
I'm assuming this is an example, but it also may be good to validate request.POST["no_of_medicine"] in the POST data against a backend source of truth to make sure it's not faked.
I am getting very weird behavior while using PyMySql.
My Connection class is as follows.
class MySqlConnection:
def __init__(self):
self.host = conf.DB_HOST
self.port = conf.DB_PORT
self.user = conf.DB_USER
self.passwd = conf.DB_PASSWD
self.db = conf.DB_DEFAULT
self.connection = None
self.cursor = None
#self.__connect()
def connect(self):
self.connection = pymysql.connect(host=self.host,
user=self.user,
password=self.passwd,
db=self.db)
self.cursor = self.connection.cursor()
And there is a wrapper on top of it, which executes the query.
class QueryExecutor:
def __init__(self, table_name_for_query, table_attributes):
self.conn = MySqlConnection()
self.table_for_query = table_name_for_query
self.table_attributes = table_attributes
def create_connection(self):
self.conn.connect()
def close_connection(self):
self.conn.close()
And then there is a table class.
class SupportedTables:
def __init__(self, table_name, attributes):
self.query_executor = QueryExecutor(table_name, attributes)
def save(self):
'''
Inserts query for saving data in DB.
'''
self.query_executor.create_connection()
value_list = []
for attrib in self.attributes:
if 'AUTO_INCREMENT' in attrib[1]:
continue
col_name = attrib[0]
value_list.append(getattr(self, col_name))
insert_data = [value_list]
if len(value_list) != 0:
self.query_executor.insert_data(data_to_insert=insert_data)
self.query_executor.close_connection()
Now, when I execute tests for SupportedTables and it's derived classes, the code works fine. I do not get any issue.
However, when I try executing the code out of test class, I get an error like:
self.query_executor.create_connection()
"/home/priyesh/db/QueryExecutor_v2.py", line 17,
in create_connection
self.conn.connect() File "/home/priyesh/db/connection_handler.py", line 24, in connect
db=self.db) File "/home/priyesh/virtualenvs/virtualenv_python3_ibsync/lib/python3.6/site-packages/pymysql/__init__.py",
line 94, in Connect
return Connection(*args, **kwargs) File "/home/priyesh/virtualenvs/virtualenv_python3_ibsync/lib/python3.6/site-packages/pymysql/connections.py",
line 327, in init
self.connect() File "/home/priyesh/virtualenvs/virtualenv_python3_ibsync/lib/python3.6/site-packages/pymysql/connections.py",
line 598, in connect
self._request_authentication()
File "/home/priyesh/virtualenvs/virtualenv_python3_ibsync/lib/python3.6/site-packages/pymysql/connections.py",
line 849, in _request_authentication
data += struct.pack('B', len(connect_attrs)) + connect_attrs struct.error: ubyte format requires 0 <= number <= 255
Any idea why this might be happening? The code is pretty simple and works from Test Class but not from Main execution script.
I already defined a loss function in pytorch, but there is an error that I could not find solution. Here is my code:
<code>
class cust_loss(torch.nn.Module):
def __init__(self):
super(cust_loss, self).__init__()
def forward(self, input, target):
predicted_labels = torch.max(input, 1)[1]
minus = torch.max(input, 1)[1] - target
cust_distance = torch.sum(minus*minus).type(torch.FloatTensor)/predicted_labels.size()[0]
return cust_distance
######## within main function ######
criterion = cust_loss()#nn.CrossEntropyLoss()
Optimizer = optim.SGD(filter(lambda p: p.requires_grad, model_conv.parameters()), lr=1e-3, momentum=0.9)
loss = criterion(inputs, labels)
loss.backward()
Unfortunately, I got this error:
Traceback (most recent call last):
File "/home/morteza/PycharmProjects/transfer_learning/test_SkinDetection.py", line 250, in <module>
main(True)
File "/home/morteza/PycharmProjects/transfer_learning/test_SkinDetection.py", line 130, in main
loss.backward()
File "/home/morteza/anaconda3/lib/python3.6/site-packages/torch/autograd/variable.py", line 156, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, retain_variables)
File "/home/morteza/anaconda3/lib/python3.6/site-packages/torch/autograd/__init__.py", line 98, in backward
variables, grad_variables, retain_graph)
File "/home/morteza/anaconda3/lib/python3.6/site-packages/torch/autograd/function.py", line 91, in apply
return self._forward_cls.backward(self, *args)
File "/home/morteza/anaconda3/lib/python3.6/site-packages/torch/autograd/_functions/basic_ops.py", line 38, in backward
return maybe_unexpand(grad_output, ctx.a_size), maybe_unexpand_or_view(grad_output.neg(), ctx.b_size), None
File "/home/morteza/anaconda3/lib/python3.6/site-packages/torch/autograd/variable.py", line 381, in neg
return Negate.apply(self)
File "/home/morteza/anaconda3/lib/python3.6/site-packages/torch/autograd/_functions/basic_ops.py", line 224, in forward
return i.neg()
AttributeError: 'torch.LongTensor' object has no attribute 'neg'
I could not solve it. I traced the code and compared it with a code that is error free, but I could not solve it. Moreover, I defined my inputs and labels as Variable with "requires_grad=True" parameter.
Please guide me how to solve it.
Thank you.
I am attempting a custom encode, but get an error. The following code sample generates an error:
#!/usr/bin/python3
import json
class Contact:
def __init__(self, first, last):
self.first = first
self.last = last
#property
def full_name(self):
return ("{} {}".format(self.first, self.last))
class ContactEncoder(json.JSONEncoder):
def defualt(self, obj):
if isinstance(obj, Contact):
return {"is_contact": 'T'
,"first": obj.first
,"last": obj.last
,"full_name": obj.full_name}
return super().defualt(obj)
if __name__ == "__main__":
c = Contact("Jay", "Loophole")
print(json.dumps(c.__dict__))
print(json.dumps(c, cls=ContactEncoder))
The error generated is:
{"first": "Jay", "last": "Loophole"}
Traceback (most recent call last):
File "json_dump.py", line 26, in <module>
print(json.dumps(c, cls=ContactEncoder))
File "/usr/lib/python3.5/json/__init__.py", line 237, in dumps
**kw).encode(obj)
File "/usr/lib/python3.5/json/encoder.py", line 198, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python3.5/json/encoder.py", line 256, in iterencode
return _iterencode(o, 0)
File "/usr/lib/python3.5/json/encoder.py", line 179, in default
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: <__main__.Contact object at 0x7ffb3445a400> is not JSON serializable
The default dictionary is successfully displayed, but when a custom encode is passed as a cls parameter, an error occurs.
Any suggestions for the reason for the error?
Here is your updated code after the defUAlt --> defAUlt correction:
import json
class Contact:
def __init__(self, first, last):
self.first = first
self.last = last
#property
def full_name(self):
return ("{} {}".format(self.first, self.last))
class ContactEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Contact):
return {"is_contact": 'T'
,"first": obj.first
,"last": obj.last
,"full_name": obj.full_name}
return super().default(obj)
if __name__ == "__main__":
c = Contact("Jay", "Loophole")
print(json.dumps(c.__dict__))
print(json.dumps(c, cls=ContactEncoder))
You can check it out live on this page.
I have scheduled a few recurring tasks with celery beat for our web app
The app itself is build using pyramid web framework. Using the zopetransaction extension to manage session
In celery, I am using the app as a library. I am redefining session in models with a function.
It works well but once in a while, it raises InvalidRequestError: This session is in 'prepared' state; no further SQL can be emitted within this transaction
I am not sure what is wrong and why it issues these warnings.
Sample code:
in tasks.py
def initialize_async_session():
import sqlalchemy
from webapp.models import Base, set_dbsession, engine
Session = sqlalchemy.orm.scoped_session(
sqlalchemy.orm.sessionmaker(autocommit=True, autoflush=True)
)
Session.configure(bind=engine)
session = Session()
set_dbsession(session)
Base.metadata.bind = engine
return session
#celery.task
def rerun_scheduler():
log.info("Starting pipeline scheduler")
session = initialize_async_session()
webapp.sheduledtask.service.check_for_updates(session)
log.info("Ending pipeline scheduler")
In models.py in webapp
DBSession = scoped_session(sessionmaker(bind=engine, expire_on_commit=False,
extension=ZopeTransactionExtension()))
def set_dbsession(db_session=None):
"""
This function sets the db session
"""
global DBSession
if db_session:
DBSession = db_session
log.info("session changed to {0}".format(db_session))
UPDATE:
traceback:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/edgem_common-0.0-py2.7.egg/common/utils.py", line 54, in new_function
result = f(*args, **kwargs)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/edgem_common-0.0-py2.7.egg/common/utils.py", line 100, in new_function
result = f(*args, **kwargs)
File "/home/ubuntu/modwsgi/env/mvc-service/webapp/webapp/data/mongo_service.py", line 1274, in run
self.table_params.set_task_status_as_finished()
File "/home/ubuntu/modwsgi/env/mvc-service/webapp/webapp/mem_objects.py", line 33, in set_task_status_as_finished
task = Task.get_by_id(self.task_id)
File "/home/ubuntu/modwsgi/env/mvc-service/webapp/webapp/models.py", line 162, in get_by_id
return DBSession.query(cls).filter(cls.id == obj_id).first()
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2156, in first
ret = list(self[0:1])
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2023, in __getitem__
return list(res)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2227, in __iter__
return self._execute_and_instances(context)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2240, in _execute_and_instances
close_with_result=True)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2231, in _connection_from_session
**kw)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 777, in connection
close_with_result=close_with_result)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 781, in _connection_for_bind
return self.transaction._connection_for_bind(engine)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 289, in _connection_for_bind
self._assert_is_active()
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 217, in _assert_is_active
"This Session's transaction has been rolled back "
InvalidRequestError: This Session's transaction has been rolled back by a nested rollback() call. To begin a new transaction, issue Session.rollback() first.
#########################################################################
[2013-05-30 14:32:57,782: WARNING/PoolWorker-3] Exception in thread Thread-4:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 552, in __bootstrap_inner
self.run()
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/edgem_common-0.0-py2.7.egg/common/utils.py", line 54, in new_function
result = f(*args, **kwargs)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/edgem_common-0.0-py2.7.egg/common/utils.py", line 100, in new_function
result = f(*args, **kwargs)
File "/home/ranjith/wksp/mvc-service/webapp/webapp/data/mongo_service.py", line 1274, in run
self.table_params.set_task_status_as_finished()
File "/home/ranjith/wksp/mvc-service/webapp/webapp/mem_objects.py", line 33, in set_task_status_as_finished
task = Task.get_by_id(self.task_id)
File "/home/ranjith/wksp/mvc-service/webapp/webapp/models.py", line 166, in get_by_id
return DBSession.query(cls).filter(cls.id == obj_id).first()
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2145, in first
ret = list(self[0:1])
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2012, in __getitem__
return list(res)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2216, in __iter__
return self._execute_and_instances(context)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2229, in _execute_and_instances
close_with_result=True)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2220, in _connection_from_session
**kw)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 798, in connection
close_with_result=close_with_result)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 802, in _connection_for_bind
return self.transaction._connection_for_bind(engine)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 281, in _connection_for_bind
self._assert_active()
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 181, in _assert_active
"This session is in 'prepared' state; no further "
InvalidRequestError: This session is in 'prepared' state; no further SQL can be emitted within this transaction.
I believe the problem is that you are attempting to use the SQLAlchemy session in your Celery task.
The first thing I recommend doing is creating two separate scoped sessions, one for your Celery application and another one for your web application. Next, I would make sure your Celery database session is only configured once during Celery initialization. You can use the Celery worker_init.connect to make sure it creates the database during Celery startup (http://hynek.me/articles/using-celery-with-pyramid/).
It is very important that your web application does not use the same database session as your Celery application.
Something like this for your tasks.py file:
from celery import Celery
from celery.signals import worker_init
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Session = sqlalchemy.orm.scoped_session(
sqlalchemy.orm.sessionmaker(autocommit=True, autoflush=True))
#worker_init.connect
def initialize_session():
some_engine = create_engine('database_url')
Session.configure(bind=some_engine)
#celery.task
def rerun_scheduler():
log.info("Starting pipeline scheduler")
webapp.sheduledtask.service.check_for_updates(Session)
log.info("Ending pipeline scheduler")
Cross posting my answer to a very similar stack overflow:
What's the proper way to use SQLAlchemy Sessions with Celery?
This solved the issue for me:
Sqlalchemy pools connections by default in a non-threadsafe manner,
Celery forks processes by default: one or the other needs to be changed.
Turn off Sqlalchemy pooling
Sql Alchemy Docs
from sqlalchemy.pool import NullPool
engine = create_engine(
SQLALCHEMY_DATABASE_URL, poolclass=NullPool
)