uwsgi two MySql errors after some time from starting application - mysql

I'm using uwsgi for my python application. I am new to uwsgi.
When I want to run uwsgi in the background I do this:
uwsgi --http 127.0.0.1:1088 --wsgi-file app.py --callable app --master --processes 1 --workers 1 --threads 1 --daemonize=logs.txt
then after some time e.g. 10 minutes when I try to login to my test account on my live website I always get 500 internal error. In logs.txt file I found this exception:
OperationalError("(_mysql_exceptions.OperationalError) (2006, 'MySQL server has gone away')")
or sometimes this one
StatementError("(sqlalchemy.exc.InvalidRequestError) Can't reconnect until invalid transaction is rolled back",)
What I did:
I found that --lazy-apps or --lazy should solve the problem but it didn't.
Here is how I used lazy-apps
uwsgi --http 127.0.0.1:1065 --wsgi-file app.py --callable app --master --lazy-apps --processes 1 --workers 1 --threads 1 --daemonize=logs.txt
Then I tried to set POOL_RECYCLE to less than 5m like this (but still the problem occurs):
app.config['SQLALCHEMY_POOL_RECYCLE'] = 285
I read that I should disable pooling using NullPool but to be honest I don't know how to do it. Here link is a list of configuration keys but there is no SQLALCHEMY_POOLCLASS
below is my code. How can I solve my problem ? Thanks
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://brrr:brrr#localhost/grrr'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
app.config['SQLALCHEMY_POOL_RECYCLE'] = 285
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
from models import *
app.secret_key = 'super secret key'
login_manager = LoginManager()
login_manager.init_app(app)
from models import *
login_manager.login_view = "login"
#login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
#app.route('/')
def index():
return render_template('index.html')
#app.route('/success')
def success():
return render_template('success.html')
#app.route('/login', methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
user = ser.query.filter_by(username=request.form['username']).first()
if user is not None:
user_pass = request.form['password']
if bcrypt.check_password_hash(user.password, user_pass):
login_user(user)
return redirect(url_for('success'))
else:
error = 'error'
else:
error = 'error'
return render_template('login.html', error=error)
#app.route('/signup', methods=['GET','POST'])
def signup():
user_name_error = None
email_error = None
if request.method == 'POST':
user = User(
username=request.form['username'],
password=request.form['password']
)
db.session.add(user)
db.session.commit()
login_user(user)
return redirect(url_for('success'))
return render_template('signup.html')
#app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()

I did the same thing like PizzaPleb did here link so I re-init my db like this
db.init_app(app)
I pasted it here
...
from models import *
login_manager.login_view = "login"
**db.init_app(app)**
...
and I got rid off that exception.

Related

check_password_hash ever return False

I am creating a flask api with login auth but the check_password_hash ever return false and I get a error
in my app.py i'm trying this
from werkzeug.security import generate_password_hash, check_password_hash
#app.route("/signup", methods=["GET", "POST"])
def signup():
if request.method == "POST":
hashed_pw = generate_password_hash(request.form["password"], method="sha256")
new_user = Users(username=request.form["username"], password=hashed_pw)
db.session.add(new_user)
db.session.commit()
return "You've registered successfully."
return render_template("signup.html")
#app.route("/login", methods=["GET", "POST"])
def login():
if request.method == "POST":
user = Users.query.filter_by(username=request.form["username"]).first()
if user and check_password_hash(user.password, request.form["password"]):
session['username'] = user.username
return "You are logged in"
else:
return "Your credentials are invalid, check and try again."
return render_template("login.html")
when i print user.password and request.form["password"] it returns hashed
pass -> sha256$SSC4jjZIE3Wm6l7v$74e78b19ddfa3ad62963c93f34d9c6cd93b67e47b4e42e896a726d79
pass -> 1
First make sure that request.form["password"] is returning the password that the user typed.
I don't know how you are hashing the password. Anyways a simple way to do it is using python passlib. it has no known weaknesses.
from passlib.hash import sha256_crypt
to save the hash:
hashed_password = sha256_crypt.hash("ThePassword")
to check the password:
ha256_crypt.verify("password from the form", hashed_password)

connection in a different thread can't read tables created in another thread?

In a testing suite I have a fixture that drop all the tables in an engine, then start fresh and create all the tables. After this fixture logic, my test case runs, using the newly created table.
The fixture and the test case are run in the MainThread, while the database consumer is a web application server run in another thread.
However, I keep getting: sqlite3.OperationalError: no such table: ***
I've checked that they are using the same in-memory engine, but different connections(this is correct). And I've checked that the fixture does run before the consumer thread starts running.
What could be possible cause?
My code is as below:
import os
import pytest
import cherrypy
class DAL:
def __init__(self,
path="database",
filename=None,
conn_string=None,
echo=False):
if filename is None and conn_string is None:
conn_string = "sqlite:///:memory:"
elif conn_string is not None:
conn_string = conn_string
else:
conn_string = f'sqlite:///{os.path.abspath(path)}/{filename}'
self.conn_string = conn_string
engine = create_engine(conn_string, echo=echo)
Session_Factory = sessionmaker(bind=engine)
self.Session = sqlalchemy.orm.scoped_session(Session_Factory)
def __str__(self):
return f"<DAL>object: {self.conn_string}, at {hex(id(self))}"
def get_a_dbsession(self):
opened_db = self.Session()
return opened_db
def __enter__(self):
return self.get_a_dbsession()
def __exit__(self, exception_type, exception_value, exception_traceback):
opened_db = self.Session()
try:
opened_db.commit()
except:
opened_db.rollback()
raise
finally:
self.Session.remove()
else:
opened_db.close()
raise exception_type
def create_schema(self):
SchemaBase.metadata.create_all(self.Session().connection().engine)
class SAEnginePlugin(cherrypy.process.plugins.SimplePlugin):
def __init__(self, bus, dal):
"""
The plugin is registered to the CherryPy engine.
"""
cherrypy.process.plugins.SimplePlugin.__init__(self, bus)
self.dal = dal
def start(self):
self.bus.subscribe("bind-session", self.bind)
def stop(self):
self.bus.unsubscribe("bind-session", self.bind)
if self.dal:
del self.dal
def bind(self):
"""
Whenever this plugin receives the 'bind-session' message, it applies
this method and bind the received session to the engine.
"""
# self.dal.Session.configure(bind=self.dal.engine)
session = self.dal.get_a_dbsession()
return session
class SATool(cherrypy.Tool):
def __init__(self):
"""
This tool binds a session to the engine each time
a requests starts and commits/rollbacks whenever
the request terminates.
"""
cherrypy.Tool.__init__(self,
'on_start_resource',
self.bind_session,
priority=20)
def _setup(self):
cherrypy.Tool._setup(self)
cherrypy.request.hooks.attach('on_end_resource',
self.close_session,
priority=80)
def bind_session(self):
"""
Attaches a session to the request's scope by requesting
the SA plugin to bind a session to the SA engine.
"""
session = cherrypy.engine.publish('bind-session').pop()
cherrypy.request.db = session
def close_session(self):
"""
Commits the current transaction or rollbacks if an error occurs.
In all cases, the current session is unbound and therefore
not usable any longer.
"""
if not hasattr(cherrypy.request, 'db'):
return
try:
cherrypy.request.db.commit()
except:
cherrypy.request.db.rollback()
raise
finally:
cherrypy.request.db.close()
cherrypy.request.db = None
# Register the SQLAlchemy tool
cherrypy.tools.db = SATool()
class UnitServer:
...
#cherrypy.expose
#cherrypy.tools.json_in()
def list_filtered_entries(self):
...
queryOBJ = cherrypy.request.db.query(classmodel_obj)
...
############# main module code below ############:
# mocking 'db':
dal = database.DAL()
# configure cherrypy:
SAEnginePlugin(cherrypy.engine, dal).subscribe()
#pytest.fixture(autouse=True) # automatically run before every test case
def mocked_dal(request):
# first, clean the database by dropping all tables:
database.SchemaBase.metadata.drop_all(dal.Session().connection().engine)
# second, create the schema from blank:
dal.create_schema()
# third, insert some dummy data record:
...
db.commit()
class TestMyUnitServer(cherrypy.test.helper.CPWebCase):
#staticmethod
def setup_server():
...
server_app = UnitServer()
cherrypy.tree.mount(server_app, '', {'/': {'tools.db.on': True}})
def test_list_filtered_entries_allentries(self):
...
self.getPage('/list_filtered_entries',
headers=[("Accept", "application/json"),
('Content-type', 'application/json'),
('Content-Length',
str(len(json.dumps(query_params)))),
("Connection", "keep-alive"),
("Cache-Control", "max-age=0")],
body=serialized_query_params,
method="POST")
self.assertStatus('200 OK')

FastAPI unittesting not overriding get_db

I'm just trying to get FastAPI unittests working with SQLAlchemy, but I'm having trouble testing with objects created in the database. I have the following setup, as per the docs.
main.py
routes = [
APIRoute('/games/', views.games_list, name='index', response_class=HTMLResponse),
]
settings = Settings()
app = FastAPI(debug=settings.debug, routes=routes)
views.py
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
async def games_list(request: Request, db: Session = Depends(get_db)):
settings = Settings()
games = db.query(Game).all()
return settings.templates.TemplateResponse('games/list.jinja', {'request': request, 'games': games})
database.py
def prepare_database(settings):
engine = create_engine(settings.database_url)
Base.metadata.create_all(engine)
return engine
engine = prepare_database(delete_existing=False, settings=Settings())
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
conftest.py
#pytest.fixture
def db_conn():
session = SessionLocalTesting()
try:
yield session
finally:
session.close()
#pytest.fixture
def cli(db_conn):
def override_get_db():
session = SessionLocalTesting()
try:
yield session
finally:
session.close()
app.dependency_overrides[get_db] = override_get_db
with TestClient(app) as client:
yield client
test file
def test_games_list(cli, factory, db_conn):
factory.create_game()
# This will return the game I have created with my factory, definitely in the test db.
print(db_conn.query(Game.name).all())
r = cli.get('/games/')
assert 'DnD Game' in r.content.decode()
My issue is that I can't get the objects from the test db in view. If I print(db.bind.database.url ) in views the test database is not being used, so it's trying to get items from the real database, not the test one.
So it looks like get_db is not getting overridden, though I'm not sure why.
Thanks in advance
I think you forgot to specify the scope of the fixture when you should call a fixture
Create a DB connection and override DB dependency through #pytest.fixture there is a kind of scope available in pytest.fixture(scope='session') there module level as well and other lots off scope available in Type of scope
I have mentioned a link which helps you Use test DB intend of real
import pytest
from fastapi.testclient import TestClient
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy_utils import create_database, drop_database
from app.main import app
from db.base import Base
from db.settings import get_database
from settings.config import (
TEST_DATABASE_URI as SQLALCHEMY_DATABASE_URL,
)
# SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(SQLALCHEMY_DATABASE_URL, pool_pre_ping=True)
def override_get_db():
"""" Override """
try:
db = Session(autocommit=False, autoflush=False, bind=engine)
yield db
finally:
db.close()
#pytest.fixture(scope="session", autouse=True)
def create_db():
""" creating db model in database """
create_database(SQLALCHEMY_DATABASE_URL)
print("\n" + "\x1b[6;30;42m" + "Creating test database." + "\x1b[0m")
Base.metadata.create_all(bind=engine)
app.dependency_overrides[get_database] = override_get_db
yield 1
drop_database(SQLALCHEMY_DATABASE_URL)
print("\n" + "\x1b[6;30;42m" + "Delete test database." + "\x1b[0m")
#pytest.fixture()
def get_db_session():
""" Getting session for db transaction """
session = Session(autocommit=False, autoflush=False, bind=engine)
yield session
session.close()
#pytest.fixture()
def client():
""" Getting testclient of app """
with TestClient(app) as client:
yield client
If you still face the problem or not let me know

Python script DB connection as Pool not working, but simple connection is working

I am writing a script in python 3 that is listening to the tunnel and saving and updating data inside MySQL depend on the message received.
I went into weird behavior, i did a simple connection to MySQL using pymysql module and everything worked fine, ut after sometime this simple connection closes.
So i decide to implement Pool connection to MySQL and here arises the problem. Something happens no errors, but the issue is the following:
My cursor = yield self._pool.execute(query, list(filters.values()))
cursor result = tornado_mysql.pools.Pool object at 0x0000019DE5D71F98
and stacks like that not doing anything more
If i remove yield from cursor pass that line and next line throws error
response = yield c.fetchall()
AttributeError: 'Future' object has no attribute 'fetchall'
How i can fix the MySQL pool connection to work properly?
What i tried:
I use few modules for pool connection, all goes in same issue
Did back simple connection with pymysql and worked again
Below my code:
python script file
import pika
from model import SyncModel
_model = SyncModel(conf, _server_id)
#coroutine
def main():
credentials = pika.PlainCredentials('user', 'password')
try:
cp = pika.ConnectionParameters(
host='127.0.0.1',
port=5671,
credentials=credentials,
ssl=False,
)
connection = pika.BlockingConnection(cp)
channel = connection.channel()
#coroutine
def callback(ch, method, properties, body):
if 'messageType' in properties.headers:
message_type = properties.headers['messageType']
if message_type in allowed_message_types:
result = ptoto_file._reflection.ParseMessage(descriptors[message_type], body)
if result:
result = protobuf_to_dict(result)
if message_type == 'MyMessage':
yield _model.message_event(data=result)
else:
print('Message type not in allowed list = ' + str(message_type))
print('continue listening...')
channel.basic_consume(callback, queue='queue', no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
except Exception as e:
print('Could not connect to host 127.0.0.1 on port 5671')
print(str(e))
if __name__ == '__main__':
main()
SyncModel
from tornado_mysql import pools
from tornado.gen import coroutine, Return
from tornado_mysql.cursors import DictCursor
class SyncModel(object):
def __init__(self, conf, server_id):
self.conf = conf
servers = [i for i in conf.mysql.servers]
for s in servers:
if s['server_id'] == server_id:
// s hold all data as, host, user, port, autocommit, charset, db, password
s['cursorclass'] = DictCursor
self._pool = pools.Pool(s, max_idle_connections=1, max_recycle_sec=3)
#coroutine
def message_event(self, data):
table_name = 'table_name'
query = ''
data = data['message']
filters = {
'id': data['id']
}
// here the connection fails as describe above
response = yield self.query_select(table_name, self._pool, filters=filters)
#coroutine
def query_select(self, table_name, _pool, filters=None):
if filters is None:
filters = {}
combined_filters = ['`%s` = %%s' % i for i in filters.keys()]
where = 'WHERE ' + ' AND '.join(combined_filters) if combined_filters else ''
query = """SELECT * FROM `%s` %s""" % (table_name, where)
c = self._pool.execute(query, list(filters.values()))
response = yield c.fetchall()
raise Return({response})
All the code was working with just simple connection to the database, after i start to use pool example is not working anymore. Will appreciate any help in this issue.
This is a stand alone script.
The pool connection was not working, so switched back to pymysql with double checking the connection
I would like to post my answer that worked, only this solution worked for me
before connecting to mysql to check if the connection is open, if not reconnect
if not self.mysql.open:
self.mysql.ping(reconnect=True)

MySQL connection pooling in separate DB class - howto?

I'm writing an application where I've moved all the MySQL connection setup and teardown to a class, initializing within individual function calls with a With statement.
Now that the development is all done, I'm optimizing and would like to set up connection pooling - but I can't for the life of me figure out how - if I initialize the pool when I set up the object in enter, won't that set up a new pool for each object?
If I put the pool setup in the global of the module, then how do I ensure I set up the pool before I start creating DB objects?
My DB code looks somewhat like this:
# Setting up details for connecting to a local MariaDB/MySQL instance
# replace with suitable code/module when porting to cloud/production
import sys
import mysql.connector
"""Module for abstracting database connectivity
Import this module and then call run_query(), run_query_vals() or run_query_no_return() """
__all__ = ['UseDatabase', 'CredentialsError', 'ConnectionError', 'SQLError']
class ConnectionError(Exception):
pass
class CredentialsError(Exception):
pass
class SQLError(Exception):
pass
dbconfig = { 'host': '127.0.0.1', 'user' : 'statdev', 'password' : 'statdev', 'database': 'stat',}
# Just so we remember. This also doubles as default server details while doing unit testing.
class UseDatabase:
# myconfig = {'host': '127.0.0.1', 'user': 'statdev', 'password': 'statdev', 'database': 'stat', }
config = None
def __init__(self, config: dict):
self.config = config
def __enter__(self) -> 'self':
try:
self.conn = mysql.connector.connect(**self.config)
self.cursor = self.conn.cursor(dictionary=True)
return self
except mysql.connector.InterfaceError as err:
print('Can\'t connect to Database - is it available? \nError: ', str(err))
raise ConnectionError(err)
except mysql.connector.ProgrammingError as err:
print('Invalid credentials - please check ID/Password. \nError: ', str(err))
raise CredentialsError(err)
except mysql.connector.IntegrityError as err:
print("Error: {}".format(err))
except Exception as err:
print('Something else went wrong:', str(err))
return err
def __exit__(self, exc_type, exc_value, exc_traceback):
self.conn.commit()
self.cursor.close()
self.conn.close()
if exc_type is mysql.connector.errors.ProgrammingError:
print('Error in SQL Code - please check the query. \nError: ', str(exc_type))
raise SQLError(exc_value)
elif exc_type:
print('Something else went wrong\n', str(exc_type))
raise exc_type(exc_value)
def run_query(self,query_str) -> 'cursor':
"""query function that takes """
self.cursor.execute(query_str, None)
return self.cursor
def run_query_vals(self, query_str, tupleval) -> 'cursor':
# print("\n\n %s " % query_str)
self.cursor.execute(query_str, tupleval)
return self.cursor
def run_query_no_return(self,query_str) -> 'cursor':
"""query function that takes """
self.cursor.execute(query_str)
return self.cursor
def test():
# dbconfig = {'host': '127.0.0.1', 'user': 'statdev', 'password': 'statdev', 'database': 'stat', }
with UseDatabase(dbconfig) as db:
# result = db.run_query("Select NULL from dual")
result = db.run_query_vals('Select NULL from dual', None)
res = result.fetchone()
if res == {'NULL': None}:
print("DB Module Test was successful! \n"
"Queries return values in dictionaries."
"\nTest query \'Select NULL from dual\' returned result: %s" % str(res))
if __name__ == '__main__':
test()
This has worked for me but I am not sure it's a perfect solution as, for example, trying to do multiple inserts via a for loop results in a 'Failed getting connection; pool exhausted' error. I did not have this problem when I was using a function-based (non class-based) connection pool. Anyway, to avoid this problem I just simply use 'cursor.executemany' in one go.
Hope this helps someone!
from mysql.connector.pooling import MySQLConnectionPool
from mysql.connector.errors import ProgrammingError, InterfaceError
from settings import config
# Database connection pool
dbconfig = config.dbconfig
dbconfig_pool = config.dbconfig_pool
#The following is my 'class DBasePool' content:
def __init__(self, dbconfig, dbconfig_pool):
self.dbconfig = dbconfig
self.pool_name = dbconfig_pool['pool_name']
self.pool_size = dbconfig_pool['pool_size']
try:
self.cnxpool = self.create_pool(pool_name=self.pool_name, pool_size=self.pool_size)
self.cnx = self.cnxpool.get_connection()
self.cursor = self.cnx.cursor(buffered=True)
except InterfaceError as e:
logger.error(e)
raise ConnectionError(e)
except ProgrammingError as e:
logger.error(e)
raise CredentialsError(e)
except Exception as e:
logger.error(e)
raise
def create_pool(self, pool_name, pool_size):
return MySQLConnectionPool(pool_name=pool_name, pool_size= pool_size, **self.dbconfig)
def close(self, cnx, cursor):
cursor.close()
cnx.close()
def execute(self, sql, data=None):
# Get connection form connection pool instead of creating one
cnx = self.cnxpool.get_connection()
cursor = cnx.cursor(buffered=True)
cursor.execute(sql, data)
if cursor.rowcount:
cnx.commit()
rowcount = cursor.rowcount
self.close(cnx, cursor)
return rowcount
else:
print('Could not insert record(s): {}, {}'.format(sql, data))
return 0