Python threaded timer running function with passed variable - function

I'm trying to run a function (f) every x seconds (in my case 60) which will close an active database connection if one exists and upon completion opens it again.
I am using threading.timer although I'm having trouble passing the connection into the function, and in some situations the function runs repeatedly with nothing else running.
The function needs to return the connection to globals after it completes and I'm finding it hard to pass the connection to the function and assign the return globally from within the function which is how I believe the threading.timer works:
enter code from socketIO_client import SocketIO
import logging
import json
import MySQLdb as mdb
import os
import threading
con = mdb.connect('localhost','username','password','databaseName')
cur = con.cursor()
def f(con):
if 'con' in globals():
con.close()
print ("Connection closed")
os.system('php -f /home/ubuntu/grab.php')
con = mdb.connect('localhost','username','password','databaseName')
cur = con.cursor()
print ("DB Connection opened")
con = mdb.connect('localhost','username','password','databaseName')
cur = con.cursor()
threading.Timer(60,f,con).start(); ######PROBLEM LINE
return con
def on_connect():
print "Connecting to database"
areas = ['EH','BE']
socketIO.emit('subscribe_areas', areas)
def on_message(answer):
print("\nNew message received")
array = (json.loads(answer))
print (array)
runningIdentity = array["value"]
berthID = array["to"]
area = array["area"]
if berthID:
query = ("SELECT crs FROM signalBerth WHERE signalBerth=\'%s\';"%(berthID))
cur.execute(("%s")%(query))
reply = cur.fetchall()
for row in reply:
crs= row[0]
query = "UPDATE service SET lastSeen = \'%s\' WHERE runningIdentity=\'%s"%(crs,runningIdentity)+"\';" #berthID == crs, need to alter
print (("%s")%(query))
cur.execute(("%s")%(query))
con.commit()
print("affected rows = {}".format(cur.rowcount))
socketIO = SocketIO('http://www.realtimetrains.co.uk', 41280) #opens connection
socketIO.on('connect', on_connect) #sends subscription
socketIO.on('message', on_message) #reads data, creates mysql and executes it
con = f(con) ######FIRST CALL TO FUNCTION
socketIO.wait() #Keeps connection openhere
Error:
Traceback (most recent call last): File "input.py", line 49, in
socketIO.wait() #Keeps connection open File "build/bdist.linux-x86_64/egg/socketIO_client/init.py", line 175,
in wait File
"build/bdist.linux-x86_64/egg/socketIO_client/init.py", line 194,
in _process_events File
"build/bdist.linux-x86_64/egg/socketIO_client/init.py", line 202,
in _process_packet File
"build/bdist.linux-x86_64/egg/socketIO_client/init.py", line 327,
in _on_event File "input.py", line 36, in on_message
cur.execute(("%s")%(query)) File "/usr/lib/python2.7/dist-packages/MySQLdb/cursors.py", line 155, in
execute
charset = db.character_set_name()
_mysql_exceptions.InterfaceError: (0, '') Exception in thread Thread-1: Traceback (most recent call last): File
"/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run() File "/usr/lib/python2.7/threading.py", line 1082, in run
self.function(*self.args, **self.kwargs) TypeError: f() argument after * must be a sequence, not Connection
Perhaps there is a more suited method to my needs, however the important bit it that the connection is closed, the function run and the connection opened again every minute or so. Thought about a cron job, but I'd rather keep my code doing everything.

According to Timer object, its third parameter is args. It is a list, but you pass only the con instead.
You need to replace your problem line with:
threading.Timer(60, f, (con,)).start()

Related

Getting error with sql query using python

i am trying to fetch the list of sql query running more than 3600 sec and kill those id's using python below is the code
import json
import mysql.connector
import pymysql
def main():
# TODO implement
connection = pymysql.connect(user='', password='',
host='',
port=3306,
database='');
cursor = connection.cursor() # get the cursor
# cursor.execute('SHOW PROCESSLIST;')
# extracted_data = cursor.fetchall();
# for i in extracted_data:
# print(i)
with connection.cursor() as cursor:
print(cursor.execute('SHOW PROCESSLIST'))
for item in cursor.fetchall():
if item.get('Time') > 3600 and item.get('command') == 'query':
_id = item.get('Id')
print('kill %s' % item)
cursor.execute('kill %s', _id)
connection.close()
main()
below is the error i am getting
"C:\drive c\pyfile\venv\Scripts\python.exe" "C:/drive c/pyfile/sqlnew2.py"
Traceback (most recent call last):
File "C:\drive c\pyfile\sqlnew2.py", line 23, in <module>
main()
File "C:\drive c\pyfile\sqlnew2.py", line 18, in main
if item.get('Time') > 3600 and item.get('command') == 'query':
AttributeError: 'tuple' object has no attribute 'get'
The .fetchall() method returns a tuple, not a dictionary. Therefore you should access the elements using the numerical indexes, for example item[0], item[1], etc
As an alternative, if you want to fetch the results as a dictionary, you can use a DictCursor
First import it:
import pymysql.cursors
Then modify the cursor line like that:
with connection.cursor(pymysql.cursors.DictCursor) as cursor:
...

How to resolve Json decode error in Ubuntu

I am running python script on Window 10.
In the python script, I am using json library.
When I run the same script on Ubuntu 20.04(running on VMware), I do see json decode error happening.
This behaviour I dont see when I run in Windows 10.
The following is the error I do get when I run the script in Ubuntu
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/usr/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "uiControl.py", line 83, in getTcpData
self.taskObj = json.loads(data.decode('utf-8'))
File "/usr/lib/python3.8/json/__init__.py", line 357, in loads
return _default_decoder.decode(s)
File "/usr/lib/python3.8/json/decoder.py", line 340, in decode
raise JSONDecodeError("Extra data", s, end)
json.decoder.JSONDecodeError: Extra data: line 5 column 2 (char 73)
In function on_message, I am printing the data received.
The following is the data I receive :
b'{"code":"101","user":"ssmr","evNumber":"TS15EC1100"}'
I call the function addToTaskQueue() to store the received data and then try to parse the data using function BackendParser()
def on_message(self,client, userdata, msg):
print(msg.payload)
self.taskObj = json.loads(msg.payload )
self.taskObj["commType"]= "mqtt"
self.taskObj["transactionType"]= "rx"
taskScheduler.addToTaskQueue(self.taskObj)
def BackendParser(msg):
if(msg["code"] == "101"):
Backend.userName = msg["user"]
Backend.evNumber = msg["evNumber"]
Backend.evChargeControl = "On"
if(Backend.requestStatus == ""):
Backend.requestStatus = "new"
class taskScheduler():
global qTaskList
qTaskList = queue.Queue()
def __init__(self):
super().__init__()
self.tcpCon = tcpServerClient("client")
self.mqttCon = mqttComm()
print("Initiated Task Scheduler class")
#staticmethod
def addToTaskQueue(item):
if not qTaskList.full():
#print("Task added")
qTaskList.put(item)
def executeFromTaskQueue(self):
if not qTaskList.empty():
item = qTaskList.get()
if("mqtt" == item["commType"]):
if("tx" == item["transactionType"]):
pubTopic = item["topic"]
del item["commType"]
del item["transactionType"]
del item["topic"]
self.mqttCon.mqttSend(item,pubTopic)
elif("rx" == item["transactionType"]):
BackendParser(item)
elif("tcp" == item["commType"]):
if("tx" == item["transactionType"]):
del item["commType"]
del item["transactionType"]
tcpServerClient.sendTcpData(item)
elif("rx" == item["transactionType"]):
BackendParser(item)
I figured out the error
I was using the following function getTcpData to receive the data.
I tried printing the data as received and noticed that there were \n characters in the message received.This was not issue when the script was executed in Windows 10. I now added the routine to remove the \n character and now it works fine in Ubuntu.
def getTcpData(self):
print("Waiting for tcp data")
while True:
if(tcpServerClient.clientsocket != None):
data=tcpServerClient.clientsocket.recv(1024)
if data:
print(data)
self.taskObj = json.loads(data.decode('utf-8'))
self.taskObj["commType"]= "tcp"
self.taskObj["transactionType"]= "rx"
taskScheduler.addToTaskQueue(self.taskObj)

Incorrect number of parameters in prepared statement

I'm having a heck of a time getting the mysql.connector module to work. I'd really like to find some accurate documentation on it. By hit and by miss, I have arrived here.
Traceback (most recent call last):
File "update_civicrm_address.py", line 80, in <module>
cursor.execute(mysql_select_query, address_id)
File "/home/ubuntu/.local/lib/python3.6/site-packages/mysql/connector/cursor.py", line 1210, in execute
msg="Incorrect number of arguments " \
mysql.connector.errors.ProgrammingError: 1210: Incorrect number of arguments executing prepared statement
Here is the program (it's a bit messy because I have tried so many things to get it to work). Aside from the fact that the update is not working at all, what is causing the error? There is only one parameter and it is accounted for.
import sys
import mysql.connector
import csv
import os
from mysql.connector import Error
from mysql.connector import errorcode
#Specify the import file
try:
inputCSV = 'geocoded_rhode_island_export.csv'
#Open the file and give it a handle
csvFile = open(inputCSV, 'r')
#Create a reader object for the input file
reader = csv.reader(csvFile, delimiter = ',')
except IOError as e:
print("The input file ", inputCSV, " was not found", e)
exit()
try:
conn = mysql.connector.connect(host='localhost',
database='wordpress',
user='wp_user',
password='secret!',
use_pure=True)
cursor = conn.cursor(prepared=True)
except mysql.connector.Error as error:
print( "Failed to connect to database: {}".format(error))
exit()
try:
record_count = 0
for row in reader:
contact_id,address_id,last_name, first_name, middle_name, longitude, latitude = row
print(row)
#Update single record now
print(address_id)
cursor.execute(
"""
update civicrm_address
set
geo_code_1 = %s,
geo_code_2 = %s
where
id = %s
and
location_type_id = %s
""",
(longitude, latitude, address_id, 6)
)
conn.commit
print(cursor.rowcount)
print("Record updated successfully")
mysql_select_query = """
select
id,
geo_code_1,
geo_code_2
from
civicrm_address
where
id = %s
"""
input = (address_id)
cursor.execute(mysql_select_query, address_id)
record = cursor.fetchone()
print(record)
record_count = record_count + 1
finally:
print(record_count, " records updated")
#closing database connection.
if(conn.is_connected()):
conn.close()
print("connection is closed")
The is an error in the code
conn.commit
should be
conn.commit()

MySQL python connector IndexError: bytearray index out of range

I'm inserting some data to a database and most of the queries are inserted correctly but I keep getting at least one random query error.
I'm using Python 3, MySQL 5.6.17 and MySQL python connector 2.1.3 (upgraded after having the same problem with 2.0.2).
The queries are run in a Multiprocessing Pool map_async().
multiprocessing.pool.RemoteTraceback: bytearray index out of range
Traceback (most recent call last):
File "./../../../my-python-script.py", line 930, in insert_into_database
mysql_query(mysql_script, values) # <-- My mysql wrapper function
File "./../../../my-python-script.py", line 297, in mysql_query
for row in results:
File "./../../../mysql/connector/cursor.py", line 450, in _execute_iter
result = next(query_iter)
File "./../../../mysql/connector/connection.py", line 520, in cmd_query_iter
yield self._handle_result(self._send_cmd(ServerCmd.QUERY, statements))
File "./../../../mysql/connector/connection.py", line 405, in _handle_result
self._socket.recv(), self.python_charset)
File "./../../../mysql/connector/protocol.py", line 238, in parse_column
(packet, _) = utils.read_lc_string(packet[4:]) # catalog
File "./../../../mysql/connector/utils.py", line 199, in read_lc_string
if buf[0] == 251: # \xfb
IndexError: bytearray index out of range
The above exception was the direct cause of the following exception:
IndexError: bytearray index out of range
Or sometimes I get (from the "for row in results" line)
File "./../../../mysql/connector/cursor.py", line 450, in _execute_iter
result = next(query_iter)
File "./../../../mysql/connector/connection.py", line 520, in cmd_query_iter
yield self._handle_result(self._send_cmd(ServerCmd.QUERY, statements))
File "./../../../mysql/connector/connection.py", line 384, in _handle_result
elif packet[4] == 0:
IndexError: bytearray index out of range
The above exception was the direct cause of the following exception:
IndexError: bytearray index out of range
My setup is something like
class InsertData:
def __init__(self):
with(multiprocessing.Pool(2) as Pool:
Pool.map_async(self.insert_into_database(),set(1,2,3.....))
Pool.close()
Pool.join()
def insert_into_database(self,values):
# use the values to do some calculations then insert to database
mysql_query(mysql_script, values)
def mysql_query(script, values):
cursor.execute(query, values, multi = True)
And the sql script
'INSERT INTO table1 ( column1 ) VALUES ( "x" ); '
'SET #table1 = LAST_INSERT_ID(); '
'INSERT INTO table2 ( column1, column2 ) VALUES ( "y", #table1 ); '
'SET #table2 = LAST_INSERT_ID(); '
...
I'm currently looking at the connector.py and the utils code trying to figure what's happening. But this is too advanced for me.
https://github.com/mysql/mysql-connector-python/blob/master/lib/mysql/connector/connection.py#L368
https://github.com/mysql/mysql-connector-python/blob/master/lib/mysql/connector/utils.py#L167
In a desperate attempt I've tried setting the buffered to True https://dev.mysql.com/doc/connector-python/en/connector-python-api-mysqlcursorbuffered.html
I need to read up on bytearrays but I suspect that my query script is causing the problem because I didn't have (I think?) this problem when I ran the queries one connector at a time cursor.execute(query, values, multi = False)
When I send the database connection as described in Accessing a MySQL connection pool from Python multiprocessing the problem disappears.
Something like
mysql_conn = None
def db_conn():
global mysql_conn
mysql_conn = connector.connect(...)
class InsertData:
def __init__(self):
with(multiprocessing.Pool(2, initializer = db_conn) as Pool:
Pool.map_async(self.insert_into_database(),set(1,2,3.....))
Pool.close()
Pool.join()
def insert_into_database(self,values):
# use the values to do some calculations then insert to database
self.mysql_query(mysql_script, values)
def mysql_query(script, values):
cursor = mysql_conn.cursor()
cursor.execute(query, values, multi = True)

SQLAlchemy session issues with celery

I have scheduled a few recurring tasks with celery beat for our web app
The app itself is build using pyramid web framework. Using the zopetransaction extension to manage session
In celery, I am using the app as a library. I am redefining session in models with a function.
It works well but once in a while, it raises InvalidRequestError: This session is in 'prepared' state; no further SQL can be emitted within this transaction
I am not sure what is wrong and why it issues these warnings.
Sample code:
in tasks.py
def initialize_async_session():
import sqlalchemy
from webapp.models import Base, set_dbsession, engine
Session = sqlalchemy.orm.scoped_session(
sqlalchemy.orm.sessionmaker(autocommit=True, autoflush=True)
)
Session.configure(bind=engine)
session = Session()
set_dbsession(session)
Base.metadata.bind = engine
return session
#celery.task
def rerun_scheduler():
log.info("Starting pipeline scheduler")
session = initialize_async_session()
webapp.sheduledtask.service.check_for_updates(session)
log.info("Ending pipeline scheduler")
In models.py in webapp
DBSession = scoped_session(sessionmaker(bind=engine, expire_on_commit=False,
extension=ZopeTransactionExtension()))
def set_dbsession(db_session=None):
"""
This function sets the db session
"""
global DBSession
if db_session:
DBSession = db_session
log.info("session changed to {0}".format(db_session))
UPDATE:
traceback:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/edgem_common-0.0-py2.7.egg/common/utils.py", line 54, in new_function
result = f(*args, **kwargs)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/edgem_common-0.0-py2.7.egg/common/utils.py", line 100, in new_function
result = f(*args, **kwargs)
File "/home/ubuntu/modwsgi/env/mvc-service/webapp/webapp/data/mongo_service.py", line 1274, in run
self.table_params.set_task_status_as_finished()
File "/home/ubuntu/modwsgi/env/mvc-service/webapp/webapp/mem_objects.py", line 33, in set_task_status_as_finished
task = Task.get_by_id(self.task_id)
File "/home/ubuntu/modwsgi/env/mvc-service/webapp/webapp/models.py", line 162, in get_by_id
return DBSession.query(cls).filter(cls.id == obj_id).first()
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2156, in first
ret = list(self[0:1])
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2023, in __getitem__
return list(res)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2227, in __iter__
return self._execute_and_instances(context)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2240, in _execute_and_instances
close_with_result=True)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2231, in _connection_from_session
**kw)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 777, in connection
close_with_result=close_with_result)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 781, in _connection_for_bind
return self.transaction._connection_for_bind(engine)
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 289, in _connection_for_bind
self._assert_is_active()
File "/home/ubuntu/modwsgi/env/local/lib/python2.7/site-packages/SQLAlchemy-0.7.9-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 217, in _assert_is_active
"This Session's transaction has been rolled back "
InvalidRequestError: This Session's transaction has been rolled back by a nested rollback() call. To begin a new transaction, issue Session.rollback() first.
#########################################################################
[2013-05-30 14:32:57,782: WARNING/PoolWorker-3] Exception in thread Thread-4:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 552, in __bootstrap_inner
self.run()
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/edgem_common-0.0-py2.7.egg/common/utils.py", line 54, in new_function
result = f(*args, **kwargs)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/edgem_common-0.0-py2.7.egg/common/utils.py", line 100, in new_function
result = f(*args, **kwargs)
File "/home/ranjith/wksp/mvc-service/webapp/webapp/data/mongo_service.py", line 1274, in run
self.table_params.set_task_status_as_finished()
File "/home/ranjith/wksp/mvc-service/webapp/webapp/mem_objects.py", line 33, in set_task_status_as_finished
task = Task.get_by_id(self.task_id)
File "/home/ranjith/wksp/mvc-service/webapp/webapp/models.py", line 166, in get_by_id
return DBSession.query(cls).filter(cls.id == obj_id).first()
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2145, in first
ret = list(self[0:1])
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2012, in __getitem__
return list(res)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2216, in __iter__
return self._execute_and_instances(context)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2229, in _execute_and_instances
close_with_result=True)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/query.py", line 2220, in _connection_from_session
**kw)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 798, in connection
close_with_result=close_with_result)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 802, in _connection_for_bind
return self.transaction._connection_for_bind(engine)
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 281, in _connection_for_bind
self._assert_active()
File "/home/ranjith/wksp/env/local/lib/python2.7/site-packages/SQLAlchemy-0.8.1-py2.7-linux-x86_64.egg/sqlalchemy/orm/session.py", line 181, in _assert_active
"This session is in 'prepared' state; no further "
InvalidRequestError: This session is in 'prepared' state; no further SQL can be emitted within this transaction.
I believe the problem is that you are attempting to use the SQLAlchemy session in your Celery task.
The first thing I recommend doing is creating two separate scoped sessions, one for your Celery application and another one for your web application. Next, I would make sure your Celery database session is only configured once during Celery initialization. You can use the Celery worker_init.connect to make sure it creates the database during Celery startup (http://hynek.me/articles/using-celery-with-pyramid/).
It is very important that your web application does not use the same database session as your Celery application.
Something like this for your tasks.py file:
from celery import Celery
from celery.signals import worker_init
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
Session = sqlalchemy.orm.scoped_session(
sqlalchemy.orm.sessionmaker(autocommit=True, autoflush=True))
#worker_init.connect
def initialize_session():
some_engine = create_engine('database_url')
Session.configure(bind=some_engine)
#celery.task
def rerun_scheduler():
log.info("Starting pipeline scheduler")
webapp.sheduledtask.service.check_for_updates(Session)
log.info("Ending pipeline scheduler")
Cross posting my answer to a very similar stack overflow:
What's the proper way to use SQLAlchemy Sessions with Celery?
This solved the issue for me:
Sqlalchemy pools connections by default in a non-threadsafe manner,
Celery forks processes by default: one or the other needs to be changed.
Turn off Sqlalchemy pooling
Sql Alchemy Docs
from sqlalchemy.pool import NullPool
engine = create_engine(
SQLALCHEMY_DATABASE_URL, poolclass=NullPool
)