Python script DB connection as Pool not working, but simple connection is working - mysql

I am writing a script in python 3 that is listening to the tunnel and saving and updating data inside MySQL depend on the message received.
I went into weird behavior, i did a simple connection to MySQL using pymysql module and everything worked fine, ut after sometime this simple connection closes.
So i decide to implement Pool connection to MySQL and here arises the problem. Something happens no errors, but the issue is the following:
My cursor = yield self._pool.execute(query, list(filters.values()))
cursor result = tornado_mysql.pools.Pool object at 0x0000019DE5D71F98
and stacks like that not doing anything more
If i remove yield from cursor pass that line and next line throws error
response = yield c.fetchall()
AttributeError: 'Future' object has no attribute 'fetchall'
How i can fix the MySQL pool connection to work properly?
What i tried:
I use few modules for pool connection, all goes in same issue
Did back simple connection with pymysql and worked again
Below my code:
python script file
import pika
from model import SyncModel
_model = SyncModel(conf, _server_id)
#coroutine
def main():
credentials = pika.PlainCredentials('user', 'password')
try:
cp = pika.ConnectionParameters(
host='127.0.0.1',
port=5671,
credentials=credentials,
ssl=False,
)
connection = pika.BlockingConnection(cp)
channel = connection.channel()
#coroutine
def callback(ch, method, properties, body):
if 'messageType' in properties.headers:
message_type = properties.headers['messageType']
if message_type in allowed_message_types:
result = ptoto_file._reflection.ParseMessage(descriptors[message_type], body)
if result:
result = protobuf_to_dict(result)
if message_type == 'MyMessage':
yield _model.message_event(data=result)
else:
print('Message type not in allowed list = ' + str(message_type))
print('continue listening...')
channel.basic_consume(callback, queue='queue', no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
except Exception as e:
print('Could not connect to host 127.0.0.1 on port 5671')
print(str(e))
if __name__ == '__main__':
main()
SyncModel
from tornado_mysql import pools
from tornado.gen import coroutine, Return
from tornado_mysql.cursors import DictCursor
class SyncModel(object):
def __init__(self, conf, server_id):
self.conf = conf
servers = [i for i in conf.mysql.servers]
for s in servers:
if s['server_id'] == server_id:
// s hold all data as, host, user, port, autocommit, charset, db, password
s['cursorclass'] = DictCursor
self._pool = pools.Pool(s, max_idle_connections=1, max_recycle_sec=3)
#coroutine
def message_event(self, data):
table_name = 'table_name'
query = ''
data = data['message']
filters = {
'id': data['id']
}
// here the connection fails as describe above
response = yield self.query_select(table_name, self._pool, filters=filters)
#coroutine
def query_select(self, table_name, _pool, filters=None):
if filters is None:
filters = {}
combined_filters = ['`%s` = %%s' % i for i in filters.keys()]
where = 'WHERE ' + ' AND '.join(combined_filters) if combined_filters else ''
query = """SELECT * FROM `%s` %s""" % (table_name, where)
c = self._pool.execute(query, list(filters.values()))
response = yield c.fetchall()
raise Return({response})
All the code was working with just simple connection to the database, after i start to use pool example is not working anymore. Will appreciate any help in this issue.
This is a stand alone script.

The pool connection was not working, so switched back to pymysql with double checking the connection
I would like to post my answer that worked, only this solution worked for me
before connecting to mysql to check if the connection is open, if not reconnect
if not self.mysql.open:
self.mysql.ping(reconnect=True)

Related

Inserting to MySQL with mysql.connector - good practice/efficiency

I am working on a personal project and was wondering if my solution for inserting data to a MySQL database would be considered "pythonic" and efficient.
I have written a separate class for that, which will be called from an object which holds a dataframe. From there I am calling my save() function to write the dataframe to the database.
The script will be running once a day where I scrape some data from some websites and save it to my database. So it is important that it really runs through completely even when I have bad data or temporary connection issues (script and database run on different machines).
import mysql.connector
# custom logger
from myLog import logger
# custom class for formatting the data, a lot of potential errors are handled here
from myFormat import myFormat
# insert strings to mysql are stored and referenced here
import sqlStrings
class saveSQL:
def __init__(self):
self.frmt = myFormat()
self.host = 'XXX.XXX.XXX.XXX'
self.user = 'XXXXXXXX'
self.password = 'XXXXXXXX'
self.database = 'XXXXXXXX'
def save(self, payload, type):
match type:
case 'First':
return self.__first(payload)
case 'Second':
...
case _:
logger.error('Undefined Input for Type!')
def __first(self, payload):
try:
self.mydb = mysql.connector.connect(host=self.host,user=self.user,password=self.password,database=self.database)
mycursor = self.mydb.cursor()
except mysql.connector.Error as err:
logger.error('Couldn\'t establish connection to DB!')
try:
tmpList = payload.values.tolist()
except ValueError:
logger.error('Value error in converting dataframe to list: ' % payload)
try:
mycursor.executemany(sqlStrings.First, tmpList)
self.mydb.commit()
dbWrite = mycursor.rowcount
except mysql.connector.Error as err:
logger.error('Error in writing to database: %s' % err)
for ele in myList:
dbWrite = 0
try:
mycursor.execute(sqlStrings.First, ele)
self.mydb.commit()
dbWrite = dbWrite + mycursor.rowcount
except mysql.connector.Error as err:
logger.error('Error in writing to database: %s \n ele: %s' % [err,ele])
continue
pass
mycursor.close()
return dbWrite
Things I am wondering about:
Is the match case a good option to distinguish between writing to different tables depending on the data?
Are the different try/except blocks really necessary or are there easier ways of handling potential errors?
Do I really need the pass command at the end of the for-loop?

Proper management of database resources: cursor and connection

I am creating a test Flask API, and have created a Database class that I use from my main app. I am using pymysql to access my MySQL DB but I am having trouble figuring out when to close the cursor and connection. Right now I have
import pymysql
class Database:
def __init__(self):
host = '127.0.0.1'
user = 'root'
password = ''
db = 'API'
self.con = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.DictCursor, autocommit=True)
self.cur = self.con.cursor()
def getUser(self, id):
sql = 'SELECT * from users where id = %d'
self.cur.execute(sql, (id))
result = self.cur.fetchall()
return result
def getAllUsers(self):
sql = 'SELECT * from users'
self.cur.execute(sql)
result = self.cur.fetchall()
return result
def AddUser(self, firstName, lastName, email):
sql = "INSERT INTO `users` (`firstName`, `lastName`, `email`) VALUES (%s, %s, %s)"
self.cur.execute(sql, (firstName, lastName, email))
I have tried adding self.cur.close() and self.con.close() after each execution of the cursor in the functions but then I get an error the next time I call a function saying the cursor is closed, or after I do an insert statement it won't show the new value even though it was inserted correctly into MySQL. How do I know when to close the cursor, and how to start it back up properly with each call to a method?
This sounds like a great use case for a python context manager. Context Managers allow you to properly manage resources, such as a database connection, by allowing you to specify how your resource's set-up and tear down methods should work. You can create your own custom context manager in one of two ways: First, by wrapping your database class, and implementing the required methods for the context manager: __init__(), __enter__(), and __exit__(). Second, by utilizing a #contextmanager decorator on a function definition and creating a generator for your database resource within said function definition. I will show both approaches and let you decide which one is your preference. The __init__() method is the initialization method for your custom context manager, similar to the initialization method used for custom python classes. The __enter__() method is your setup code for your custom context manager. Lastly, the __exit()__ method is your teardown code for your custom context manager. Both approaches utilize these methods with the main difference being the first method will explicitly state these methods within your class definition. Where as in the second approach, all the code up to your generator's yield statement is your initialization and setup code and all the code after the yield statement is your teardown code. I would also consider extracting out your user based database actions into a user model class as well. Something along the lines of:
custom context manager: (class based approach):
import pymysql
class MyDatabase():
def __init__(self):
self.host = '127.0.0.1'
self.user = 'root'
self.password = ''
self.db = 'API'
self.con = None
self.cur = None
def __enter__(self):
# connect to database
self.con = pymysql.connect(host=self.host, user=self.user, password=self.password, db=self.db, cursorclass=pymysql.cursors.DictCursor, autocommit=True)
self.cur = self.con.cursor()
return self.cur
def __exit__(self, exc_type, exc_val, traceback):
# params after self are for dealing with exceptions
self.con.close()
user.py (refactored):'
# import your custom context manager created from the step above
# if you called your custom context manager file my_database.py: from my_database import MyDatabase
import <custom_context_manager>
class User:
def getUser(self, id):
sql = 'SELECT * from users where id = %d'
with MyDatabase() as db:
db.execute(sql, (id))
result = db.fetchall()
return result
def getAllUsers(self):
sql = 'SELECT * from users'
with MyDatabase() as db:
db.execute(sql)
result = db.fetchall()
return result
def AddUser(self, firstName, lastName, email):
sql = "INSERT INTO `users` (`firstName`, `lastName`, `email`) VALUES (%s, %s, %s)"
with MyDatabase() as db:
db.execute(sql, (firstName, lastName, email))
context manager (decorator approach):
from contextlib import contextmanager
import pymysql
#contextmanager
def my_database():
try:
host = '127.0.0.1'
user = 'root'
password = ''
db = 'API'
con = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.DictCursor, autocommit=True)
cur = con.cursor()
yield cur
finally:
con.close()
Then within your User class you could use the context manager by first importing the file and then using it similar to as before:
with my_database() as db:
sql = <whatever sql stmt you wish to execute>
#db action
db.execute(sql)
Hopefully that helps!

Lambda (Python 3.6) PyMySql Query EXISTS query always returns 1

I am trying to get a PyMySQL query in Lambda (Python 3.6) to return whether a user exists or not. I pass my slack user ID into the query. This is what I want to check in MySQL. I can run the same query through MySQL and it returns a 0, but for some reason, every time I call this query through lambda, it tells me the user exists (My database is empty). My query is function is this:
def userExists(user):
statement = f"SELECT EXISTS(SELECT 1 FROM slackDB.Assets WHERE userID LIKE '%{user}%')Assets"
tempBool = cursor.execute(statement, args=None)
conn.commit()
return tempBool
Here is the full code I am working with:
################################
# Slack Lambda handler.
################################
import sys
import logging
import os
import pymysql
import urllib
# Grab data from the environment.
BOT_TOKEN = os.environ["BOT_TOKEN"]
ASSET_TABLE = os.environ["ASSET_TABLE"]
REGION_NAME = os.getenv('REGION_NAME', 'us-east-2')
DB_NAME = "admin"
DB_PASSWORD = "somepassword"
DB_DATABASE = "someDB"
RDS_HOST = "myslackdb.somepseudourl.us-east-2.rds.amazonaws.com"
port = 3306
logger = logging.getLogger()
logger.setLevel(logging.INFO)
try:
conn = pymysql.connect(RDS_HOST, user=DB_NAME, passwd=DB_PASSWORD, db=DB_DATABASE, connect_timeout=5)
cursor = conn.cursor()
except:
logger.error("ERROR: Unexpected error: Could not connect to MySql instance.")
sys.exit()
# Define the URL of the targeted Slack API resource.
SLACK_URL = "https://slack.com/api/chat.postMessage"
def userExists(user):
statement = f"SELECT EXISTS(SELECT 1 FROM slackDB.Assets WHERE userID LIKE '%{user}%')Assets"
tempBool = cursor.execute(statement, args=None)
conn.commit()
return tempBool
def addUser(user):
statement = f"INSERT INTO `slackDB`.`Assets` (`userID`, `money`) VALUES ('{user}', '1000')"
tempBool = cursor.execute(statement, args=None)
conn.commit()
return tempBool
def lambda_handler(data, context):
# Slack challenge answer.
if "challenge" in data:
return data["challenge"]
# Grab the Slack channel data.
slack_event = data['event']
slack_userID = slack_event["user"]
slack_text = slack_event["text"]
channel_id = slack_event["channel"]
slack_reply = ""
# Ignore bot messages.
if "bot_id" in slack_event:
slack_reply = ""
else:
# Start data sift.
if slack_text.startswith("!networth"):
slack_reply = "Your networth is: "
elif slack_text.startswith("!price"):
command,asset = text.split()
slack_reply = f"The price of a(n) {asset} is: "
elif slack_text.startswith("!addme"):
if userExists(slack_userID):
slack_reply = f"User {slack_userID} already exists"
else:
slack_reply = f"Adding user {slack_userID}"
addUser(slack_userID)
# We need to send back three pieces of information:
data = urllib.parse.urlencode(
(
("token", BOT_TOKEN),
("channel", channel_id),
("text", slack_reply)
)
)
data = data.encode("ascii")
# Construct the HTTP request that will be sent to the Slack API.
request = urllib.request.Request(
SLACK_URL,
data=data,
method="POST"
)
# Add a header mentioning that the text is URL-encoded.
request.add_header(
"Content-Type",
"application/x-www-form-urlencoded"
)
# Fire off the request!
urllib.request.urlopen(request).read()
# Everything went fine.
return "200 OK"
I am typing '!addme' in slack and it always tells me the user exists. I have printed out my query statement and it is inputting my slack ID correctly. I have checked my table, and it is completely empty. I have run the query in MySQL and it returns a 0.
Does anyone have any ideas? Am I just derping this up on something easy? Any helps or hints is much appreciated.
Thanks,
I don't see a fetch from the cursor. Just the execute.
And the return from execute is the number of rows affected. For DML operations (INSERT/UPDATE/DELETE) that makes sense. But I wouldn't rely on the rows affected count for a SELECT.
In this case, the SELECT EXISTS query is going to either return a row, or throw an error. But the fact that the query returns a row doesn't tell us anything about the value of the Assets column.
From the query, it looks like we want to fetch a row, and then determine if the Assets column contains a 0 or 1 (or NULL).
After the query execution, try cur.fetchone to retrieve the row.
We could also execute a simpler query, and then use a fetch to determine if a row is returned or not.

Working with coroutines in Python Tornado Web Server

I am working on an autonomous car implementation for a web browser game with Python 2x. I use Tornado Web Server to run game on localhost and I post and receive data from game with JSON data format in the function called "FrameHandler" and also I determine what the act of car should be in "to_dict_faster()" function.
Here, my problem is that I can write data to text file which is hold in speed_data variable in specific time interval with help of a coroutine. However, I can't dump JSON data to function in this specific time interval because "FrameHandler" acts like While True and it always requests data to dump. What I am trying to do is sending desired acts as writing text file in specific time interval while not changing flow frame handler because it affects FPS of the game.
I am trying to figure out How can I do that for a long time any help would be great here:
#gen.coroutine
def sampler():
io_loop = tornado.ioloop.IOLoop.current()
start = time.time()
while True:
with open("Sampled_Speed.txt", "a") as text_file:
text_file.write("%d,%.2f\n" % (speed_data, ((time.time() - start))))
yield gen.Task(io_loop.add_timeout, io_loop.time() + period)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.redirect("/static/v2.curves.html")
class FrameHandler(tornado.web.RequestHandler):
def post(self):
global speed_data
data = json.loads(self.get_arguments("telemetry")[0])
ar = np.fromstring(base64.decodestring(self.request.body), dtype=np.uint8)
image = ar.reshape(hp.INPUT_SIZE, hp.INPUT_SIZE, hp.NUM_CHANNELS)
left, right, faster, slower = data["action"]
terminal, action, all_data, was_start = (
data["terminal"],
Action(left=left, right=right, faster=faster, slower=slower),
data["all_data"],
data["was_start"]
)
for i in range(len(all_data)):
data_dict=all_data[i]
speed_data = data_dict[u'speed']
position_data=data_dict[u'position']
result_action = agent.steps(image, 0.1, terminal, was_start, action, all_data)
if speed_data < 4000:
self.write(json.dumps(result_action.to_dict_faster()))
else:
self.write(json.dumps(result_action.to_dict_constant()))
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
(r"/frame", FrameHandler),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": static_path})
], debug=True)
if __name__ == "__main__":
app = make_app()
if "SERVER_PORT" in os.environ:
port = int(os.environ["SERVER_PORT"])
else:
port = 8880
print "LISTENING ON PORT: %d" % port
app.listen(port)
tornado.ioloop.IOLoop.current().run_sync(sampler)
tornado.ioloop.IOLoop.current().start()
You can move file writing to a different thread (using tornado's run_on_executor for example), so python interpreter will automatically switch from Sampler to main thread with FrameHandler on write. But you have to use thread-safe speed_data variable, I've used stdlib Queue.Queue as an example:
class Handler(tornado.web.RequestHandler):
#gen.coroutine
def get(self):
global speed_data
speed_data.put("REALLY BIG TEST DATA\n")
self.finish("OK")
class Sampler():
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
def __init__(self, queue):
self._q = queue
#run_on_executor
def write_sample(self):
with open("foobar.txt", "w") as f:
while True:
data = self._q.get()
f.write(data)
if __name__ == '__main__':
application = Application(
[("/status", Handler)]
)
server = HTTPServer(application)
server.listen(8888)
speed_data = Queue.Queue()
smp = Sampler(speed_data)
IOLoop.current().add_callback(smp.write_sample)
IOLoop.current().start()

MySQL connection pooling in separate DB class - howto?

I'm writing an application where I've moved all the MySQL connection setup and teardown to a class, initializing within individual function calls with a With statement.
Now that the development is all done, I'm optimizing and would like to set up connection pooling - but I can't for the life of me figure out how - if I initialize the pool when I set up the object in enter, won't that set up a new pool for each object?
If I put the pool setup in the global of the module, then how do I ensure I set up the pool before I start creating DB objects?
My DB code looks somewhat like this:
# Setting up details for connecting to a local MariaDB/MySQL instance
# replace with suitable code/module when porting to cloud/production
import sys
import mysql.connector
"""Module for abstracting database connectivity
Import this module and then call run_query(), run_query_vals() or run_query_no_return() """
__all__ = ['UseDatabase', 'CredentialsError', 'ConnectionError', 'SQLError']
class ConnectionError(Exception):
pass
class CredentialsError(Exception):
pass
class SQLError(Exception):
pass
dbconfig = { 'host': '127.0.0.1', 'user' : 'statdev', 'password' : 'statdev', 'database': 'stat',}
# Just so we remember. This also doubles as default server details while doing unit testing.
class UseDatabase:
# myconfig = {'host': '127.0.0.1', 'user': 'statdev', 'password': 'statdev', 'database': 'stat', }
config = None
def __init__(self, config: dict):
self.config = config
def __enter__(self) -> 'self':
try:
self.conn = mysql.connector.connect(**self.config)
self.cursor = self.conn.cursor(dictionary=True)
return self
except mysql.connector.InterfaceError as err:
print('Can\'t connect to Database - is it available? \nError: ', str(err))
raise ConnectionError(err)
except mysql.connector.ProgrammingError as err:
print('Invalid credentials - please check ID/Password. \nError: ', str(err))
raise CredentialsError(err)
except mysql.connector.IntegrityError as err:
print("Error: {}".format(err))
except Exception as err:
print('Something else went wrong:', str(err))
return err
def __exit__(self, exc_type, exc_value, exc_traceback):
self.conn.commit()
self.cursor.close()
self.conn.close()
if exc_type is mysql.connector.errors.ProgrammingError:
print('Error in SQL Code - please check the query. \nError: ', str(exc_type))
raise SQLError(exc_value)
elif exc_type:
print('Something else went wrong\n', str(exc_type))
raise exc_type(exc_value)
def run_query(self,query_str) -> 'cursor':
"""query function that takes """
self.cursor.execute(query_str, None)
return self.cursor
def run_query_vals(self, query_str, tupleval) -> 'cursor':
# print("\n\n %s " % query_str)
self.cursor.execute(query_str, tupleval)
return self.cursor
def run_query_no_return(self,query_str) -> 'cursor':
"""query function that takes """
self.cursor.execute(query_str)
return self.cursor
def test():
# dbconfig = {'host': '127.0.0.1', 'user': 'statdev', 'password': 'statdev', 'database': 'stat', }
with UseDatabase(dbconfig) as db:
# result = db.run_query("Select NULL from dual")
result = db.run_query_vals('Select NULL from dual', None)
res = result.fetchone()
if res == {'NULL': None}:
print("DB Module Test was successful! \n"
"Queries return values in dictionaries."
"\nTest query \'Select NULL from dual\' returned result: %s" % str(res))
if __name__ == '__main__':
test()
This has worked for me but I am not sure it's a perfect solution as, for example, trying to do multiple inserts via a for loop results in a 'Failed getting connection; pool exhausted' error. I did not have this problem when I was using a function-based (non class-based) connection pool. Anyway, to avoid this problem I just simply use 'cursor.executemany' in one go.
Hope this helps someone!
from mysql.connector.pooling import MySQLConnectionPool
from mysql.connector.errors import ProgrammingError, InterfaceError
from settings import config
# Database connection pool
dbconfig = config.dbconfig
dbconfig_pool = config.dbconfig_pool
#The following is my 'class DBasePool' content:
def __init__(self, dbconfig, dbconfig_pool):
self.dbconfig = dbconfig
self.pool_name = dbconfig_pool['pool_name']
self.pool_size = dbconfig_pool['pool_size']
try:
self.cnxpool = self.create_pool(pool_name=self.pool_name, pool_size=self.pool_size)
self.cnx = self.cnxpool.get_connection()
self.cursor = self.cnx.cursor(buffered=True)
except InterfaceError as e:
logger.error(e)
raise ConnectionError(e)
except ProgrammingError as e:
logger.error(e)
raise CredentialsError(e)
except Exception as e:
logger.error(e)
raise
def create_pool(self, pool_name, pool_size):
return MySQLConnectionPool(pool_name=pool_name, pool_size= pool_size, **self.dbconfig)
def close(self, cnx, cursor):
cursor.close()
cnx.close()
def execute(self, sql, data=None):
# Get connection form connection pool instead of creating one
cnx = self.cnxpool.get_connection()
cursor = cnx.cursor(buffered=True)
cursor.execute(sql, data)
if cursor.rowcount:
cnx.commit()
rowcount = cursor.rowcount
self.close(cnx, cursor)
return rowcount
else:
print('Could not insert record(s): {}, {}'.format(sql, data))
return 0