Can't create MySQL Boolean Field Using flask-sqlalchemy & flask-migrate - mysql

I have a working flask app with few models. The User model is as follow...
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(128), index=True, unique=True)
password_hash = db.Column(db.String(128))
first_name = db.Column(db.String(32))
last_name = db.Column(db.String(32))
bio = db.Column(db.String(255))
patterns = db.relationship('Pattern', backref='user', lazy='dynamic')
Now I want to add a new boolean column to the model. I'm utilizing MySQL as a database. I have tried the following...
invited = db.Column(db.Boolean, default=0)
but when I runt flask db migrate I get the following...
INFO [alembic.runtime.migration] Context impl MySQLImpl.
I also tried
from sqlalchemy import BOOLEAN # also with Boolean
...
invited = db.Column(BOOLEAN, default=0) # also with Boolean
but get the same error. Reading MySQL documentation found out that MySQL doesn't have boolean type rather TINYINT. But reading this Github thread I understand that the Boolean class will turn into TINYINT based on the dialect. So I did the following...
from sqlalchemy.dialect.mysql import BOOLEAN
and still I get the same error when flask run migrate. Seems like Alembic can't see the changes in the model.
Is there a way to create a boolean field in mysql utilizing flask-migrate and flask-sqlalchemy?

Try below, I'm assuming Flask migrate is not recognizing db.Column(BOOLEAN, default=0).
a_boolean_field = db.Column(db.Boolean(), default=False)
I've just tested it and above works and Flask-Migrate was able to detect it.

You can do it like so
from sqlalchemy import Boolean, Column
invited = Column(Boolean, nullable=False, default=False)
this should work with MySQL.

Related

In SQLAlchemy, how should I specify that the relationship field is required?

I have a model that depends on some fields on another model. This fields should be present when the record is created, but I do not see a way to enforce that on the database:
class Study(db.Model):
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
type = db.Column(Enum(StudyTypeChoices), nullable=False)
owner_id = db.Column(UUID(as_uuid=True), db.ForeignKey('owner.id'), nullable=False)
participants = db.relationship('Participant', lazy=True, cascade='save-update, merge, delete')
How can I make sure that 'participants' is provided when the Study record gets created (similar to what happens with the 'type' field)? I know I can put a wrapper around it to make sure of that, but I am wondering is there is a more neat way of doing it with sqlalchemy.
Edit: This is the definition of the Participant model
class Participant(UserBase):
id = db.Column(UUID(as_uuid=True), db.ForeignKey("user_base.id"), primary_key=True)
study_id = db.Column(UUID(as_uuid=True), db.ForeignKey('study.id'))
You can listen to before_flush events and prevent flushes containing studies without participants by raising an exception for instance.
#event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.new: # might want to inspect session.dirty as well
if isinstance(instance, Study) and (
instance.participants is None or instance.participants == []
):
raise ValueError(
f"Study {instance} cannot have {instance.participants} participants."
)
This only checks for new studies, you might want to check in session.dirty as well for updated studies.
Full demo:
from sqlalchemy import Column, ForeignKey, Integer, create_engine, event
from sqlalchemy.orm import Session, declarative_base, relationship
Base = declarative_base()
class Study(Base):
__tablename__ = "study"
id = Column(Integer, primary_key=True)
participants = relationship("Participant", uselist=True, back_populates="study")
class Participant(Base):
__tablename__ = "participant"
id = Column(Integer, primary_key=True)
study_id = Column(Integer, ForeignKey("study.id"), nullable=True)
study = relationship("Study", back_populates="participants")
#event.listens_for(Session, "before_flush")
def before_flush(session, flush_context, instances):
for instance in session.new: # might want to inspect session.dirty as well
if isinstance(instance, Study) and (
instance.participants is None or instance.participants == []
):
raise ValueError(
f"Study {instance} cannot have {instance.participants} participants."
)
engine = create_engine("sqlite://", future=True, echo=True)
Base.metadata.create_all(engine)
s1 = Study()
p1_1 = Participant()
p1_2 = Participant()
s1.participants.extend([p1_1, p1_2])
s2 = Study()
with Session(bind=engine) as session:
session.add(s1)
session.commit() # OK
with Session(bind=engine) as session:
session.add(s2)
session.commit() # ValueError

Is it possible to query using raw sql and get object back?

In Hibernate it's possible to query using raw sql and get entities (objects) back. Something like: createSQLQuery(sql).addEntity(User.class).list().
Is it possible to do the same in sqlalchemy?
As #Ilja notes via link in a comment to the question, it is possible to do what you describe using .from_statement() as described in the documentation:
from sqlalchemy import Column, create_engine, Integer, select, String, text
from sqlalchemy.orm import declarative_base, Session
engine = create_engine("sqlite://")
Base = declarative_base()
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
def __repr__(self):
return f"<Person(id={self.id}, name='{self.name}')>"
Base.metadata.create_all(engine)
# sample data
with Session(engine) as session, session.begin():
session.add_all(
[Person(name="Adam"), Person(name="Alicia"), Person(name="Brandon")]
)
# test
with Session(engine) as session, session.begin():
sql = "SELECT id FROM person WHERE name LIKE 'A%'"
results = session.scalars(select(Person).from_statement(text(sql))).all()
print(results)
# [<Person(id=1, name='Adam')>, <Person(id=2, name='Alicia')>]
When using the entityManager you can try:
entityManager.createNativeQuery("select some native query", User.class)
According to the API:
public Query createNativeQuery(String sqlString, Class resultClass);

sqlalchemy relationship select from other table instead of insert

I'm having difficulties in relationships. I have users and roles and defined model and schema for them.
the problem is when I try to add a new user with a previously defined role (I have its ID and name)it will try to update/insert the role table by the value the user specifies. but I only want to select from roles and specify that as a user role and not updating the role table(if role not found return error).
what I want to achieve is how to limit SQLalchemy in updating related tables by the value that the user specifies.
here is my models:
class User(db.Model):
"""user model
"""
__tablename__ = 'user'
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
username = db.Column(db.String(40), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
role_id = db.Column(UUID(as_uuid=True), db.ForeignKey('role.id') , nullable=False)
class Role(db.Model):
"""role model
"""
__tablename__ = 'role'
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)
name = db.Column(db.String(40), unique=True, nullable=False)
perm_add = db.Column(db.Boolean, default=False)
perm_edit = db.Column(db.Boolean, default=False)
perm_del = db.Column(db.Boolean, default=False)
here is the schema that I defined:
class UserSchema(ma.SQLAlchemyAutoSchema):
password = ma.String(load_only=True, required=True)
email = ma.String(required=True)
role = fields.Nested("RoleSchema", only=("id", "name"), required=True)
class Meta:
model = User
sqla_session = db.session
load_instance = True
and I grab user input which is checked by schema and commit it to DB.
schema = UserSchema()
user = schema.load(request.json)
db.session.add(user)
try:
db.session.commit()
the point is here I could not change anything regarding role name or ID as it seems it is changed by schema even before applying to DB (I mean request.json)
In my example, I am using the additional webargs library. It facilitates validation on the server side and enables clean notation. Since marschmallow is based on webargs anyway, I think the addition makes sense.
I have based myself on your specifications. Depending on what you intend to do further, you may need to make adjustments.
I added a relationship to the user model to make the role easier to use.
class User(db.Model):
"""user model"""
# ...
# The role is mapped by sqlalchemy using the foreign key
# as an object and can be reached via a virtual relationship.
role = db.relationship('Role')
I have allowed the foreign key as a query parameter in the schema and limited the nested schema to the output. The email is assigned to the username.
class RoleSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Role
load_instance = True
class UserSchema(ma.SQLAlchemyAutoSchema):
# The entry of the email is converted into a username.
username = ma.String(required=True, data_key='email')
password = ma.String(required=True, load_only=True)
# The foreign key is only used here for loading.
role_id = ma.Integer(required=True, load_only=True)
# The role is dumped with a query.
role = ma.Nested("RoleSchema", only=("id", "name"), dump_only=True)
class Meta:
model = User
load_instance = True
include_relationships = True
It is now possible to query the role from the database and react if it does not exist. The database table for the roles is no longer updated automatically.
from flask import abort
from sqlalchemy.exc import SQLAlchemyError
from webargs.flaskparser import use_args, use_kwargs
# A schema-compliant input is expected as JSON
# and passed as a parameter to the function.
#blueprint.route('/users/', methods=['POST'])
#use_args(UserSchema(), location='json')
def user_new(user):
# The role is queried from the database and assigned to the user object.
# If not available, 404 Not Found is returned.
user_role = Role.query.get_or_404(user.role_id)
user.role = user_role
# Now the user can be added to the database.
db.session.add(user)
try:
db.session.commit()
except SQLAlchemyError as exc:
# If an error occurs while adding to the database,
# 422 Unprocessable Entity is returned
db.session.rollback()
abort(422)
# Upon successful completion, the new user is returned
# with the status code 201 Created.
user_schema = UserSchema()
user_data = user_schema.dump(user)
return jsonify(data=user_data), 201

Dialect-specific table and column names in sqlalchemy while reusing an existing ORM mapping

I use sqlalchemy's ORM library to interface with one of our application's databases. On one host, the database backend of an application instance is PostgreSQL, while on another, we use MySQL for historical reasons.
The schemas (and relationships) are identical on both backends, but the table and column names in PostgreSQL are lowercase, while on MySQL they are uppercase. Is there any way I can adapt my schema in sqlalchemy such that I don't need to duplicate code?
The following is an example of my schema. Hint, the application is a Confluence wiki ;)
from sqlalchemy import (
BigInteger,
Column,
DateTime,
String,
)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Space(Base):
__tablename__ = 'spaces'
spaceid = Column(BigInteger, primary_key=True)
spacename = Column(String(255))
spacekey = Column(String(255), nullable=False, unique=True)
lowerspacekey = Column(String(255), nullable=False, index=True)
spacedescid = Column(BigInteger, index=True)
homepage = Column(BigInteger, index=True)
creator = Column(String(255), index=True)
creationdate = Column(DateTime, index=True)
lastmodifier = Column(String(255), index=True)
lastmoddate = Column(DateTime)
spacetype = Column(String(255))
spacestatus = Column(String(255), index=True)
Thanks to #rfkortekaas' suggestion, I managed to solve the issue by automapping the schema and subsequently overriding the naming scheme for table and column names.
The documentation of sqlalchemy has two very useful pages:
Overriding Naming Schemes
Intercepting Column Definitions
For posterity's sake:
from sqlalchemy import event
from sqlalchemy.ext.automap import automap_base
from .utilities import to_pascal_case, to_snake_case
AutomapBase = automap_base()
#event.listens_for(AutomapBase.metadata, "column_reflect")
def column_reflect(inspector, table, column_info):
column_info['key'] = to_snake_case(column_info['name'])
def classname_for_table(base, table_name, table):
return to_pascal_case(table_name)
if __name__ == '__main__':
dbcs = 'postgresql://username:password#127.0.0.1:3306/database'
automap_engine = create_engine(dbcs, future=True, echo=verbose > 2)
AutomapBase.prepare(
autoload_with=automap_engine,
classname_for_table=classname_for_table,
)
# Access the automapped classes with AutomapBase.classes.<class_name>

How to create postgresql's sequences in Alembic

I'm using alembic to maintain my tables. At the same time, I update my models using the declarative way.
This is one the alembic's table:
op.create_table(
'groups',
Column('id', Integer, Sequence('group_id_seq'), primary_key=True),
Column('name', Unicode(50)),
Column('description', Unicode(250)),
)
And the model is like the following:
class Group(Base):
__tablename__ = 'groups'
id = Column(Integer, Sequence('group_id_seq'), primary_key=True)
name = Column(Unicode(50))
description = Column(Unicode(250))
def __init__(self, name, description):
self.description = description
self.name = name
You can see, I'm using the Sequence in both the alembic migration and in the declarative model.
But I have noticed that when using PostgreSQL (v9.1) no sequences are created by alembic, and so the models fail to create instances since they will use the nextval(<sequence name>) clause.
So, how can I create my alembic migrations so that the sequences are truly generated in postgresql?
Just add following to your model:
field_seq = Sequence('groups_field_seq')
field = Column(Integer, field_seq, server_default=field_seq.next_value())
And add following to your migration file (before creating table):
from sqlalchemy.schema import Sequence, CreateSequence
op.execute(CreateSequence(Sequence('groups_field_seq')))
Found a hint at https://bitbucket.org/zzzeek/alembic/issue/60/autogenerate-for-sequences-as-well-as#comment-4100402
Following the CreateSequence found in the previous link I still have to jump through several hoops to make my migrations works in SQLite and PostgreSQL. Currently I have:
def dialect_supports_sequences():
return op._proxy.migration_context.dialect.supports_sequences
def create_seq(name):
if dialect_supports_sequences():
op.execute(CreateSequence(Sequence(name)))
And then call the create_seq whenever I need it.
Is this the best practice?
Not sure if I got your question right but as nobody else chose to answer, here is how I get perfectly normal ids:
Alembic:
op.create_table('event',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
The class:
class Event(SQLADeclarativeBase):
__tablename__ = 'event'
id = Column(Integer, primary_key = True)
I ran into this same issue recently and here is how i solved it.
op.execute("create sequence SEQUENCE_NAME")
I ran the above command inside the upgrade function and for the downgrade run the below code inside the downgrade function respectively.
op.execute("drop sequence SEQUENCE_NAME")