Alembic attempts to recreate all tables in the Base class on every migration - sqlalchemy

In my env.py I have set my target_metadata to Base.metadata which I import from models.py. I have a fresh database with a schema named basic that I want to use to create the tables and setup my models.py like this:
from datetime import datetime
from sqlalchemy import Column, DateTime, Integer, MetaData, String
from sqlalchemy.orm import declarative_base
Base = declarative_base(metadata=MetaData(schema='basic'))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
created_on = Column(DateTime, default=datetime.utcnow)
I run alembic revision --autogenerate -m"Create user model" and run alembic upgrade heads. Everything works as expected and I have table user in my database under the schema basic.
Now I want to add a table country. I add it to my models.py which now looks like this:
from datetime import datetime
from sqlalchemy import Column, DateTime, Integer, MetaData, String
from sqlalchemy.orm import declarative_base
Base = declarative_base(metadata=MetaData(schema='basic'))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
created_on = Column(DateTime, default=datetime.utcnow)
class Country(Base):
__tablename__ = 'country'
id = Column(Integer, primary_key=True)
country = Column(String, nullable=False)
created_on = Column(DateTime, default=datetime.utcnow)
I run alembic revision --autogenerate -m"Create country model" which creates a new versions file that looks like this:
"""Create country model
Revision ID: 0eef32919b0d
Revises: 2da4668d1069
Create Date: 2023-01-19 15:39:08.778274
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0eef32919b0d'
down_revision = '2da4668d1069'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('country',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('country', sa.String(), nullable=False),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='basic'
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='basic'
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user', schema='basic')
op.drop_table('country', schema='basic')
# ### end Alembic commands ###
Why does it also try to create the table user again? Running this will give an error that the object basic.user already exists. How can I fix this so that it looks at the current state of the db and only wants to create the table country?
Setting the option include_schemas=True (which is suggested in this thread: Alembic - sqlalchemy does not detect existing tables) helps but then includes all schemas and I only want it to be aware of this single schema.

I only want it to be aware of this single schema.
Then you also need to use include_name=, like so:
def run_migrations_online():
# …
def include_name(name, type_, parent_names):
if type_ == "schema":
# note this will not include the default schema
return name in ["basic"]
else:
return True
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata,
include_schemas=True,
include_name=include_name
)

Related

Alembic Revision Autogenerate Didn't Recognized The Default Value Change

I was testing Alembic.
Initially I created a model something like this:
from main import Base
from sqlalchemy import Column, BigInteger, SmallInteger, String, Sequence, ForeignKey
class Users(Base):
__tablename__ = "users"
id = Column(BigInteger, Sequence("user_id_seq"),
primary_key=True)
first_name = Column(String(50))
last_name = Column(String(50))
email = Column(String(255))
password = Column(String(60), nullable=True)
Then I created the revision in alembic and it worked absolutely fine and I got the result properly.
Then I added the user types table and then my models looked like this,
from main import Base
from sqlalchemy import Column, BigInteger, SmallInteger, String, Sequence, ForeignKey
class Users(Base):
__tablename__ = "users"
id = Column(BigInteger, Sequence("user_id_seq"),
primary_key=True)
first_name = Column(String(50))
last_name = Column(String(50))
email = Column(String(255))
password = Column(String(60), nullable=True)
user_type = Column(SmallInteger, ForeignKey(
"user_types.id", name="fk_user_type"))
class UserTypes(Base):
__tablename__ = "user_types"
id = Column(SmallInteger, Sequence("user_types_id_seq"),
primary_key=True)
type = Column(String(20))
Now I created the revision for this and obviously that also worked.
But then I thought that lets make the user_type default value 1. So I did a small change in Users model and added default value 1:
user_type = Column(SmallInteger, ForeignKey(
"user_types.id", name="fk_user_type"), default=1)
Now ideally if I created migration it should work. But it gave me the blank file:
"""Made Default Value Of user_type 1
Revision ID: 054b79123431
Revises: 84bc1adb3e66
Create Date: 2022-12-28 17:20:06.757224
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '054b79123431'
down_revision = '84bc1adb3e66'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
I also tried to add compare_server_default=True in the context.configure calls in both offline and online migration function as it was said in an answer I found on internet which was related to the same issue but that also didnt worked. Here is the link.
So if anyone knows the solution for this please tell me I would really be thankful to you!

SQLAlchemy Table classes and imports

I started a project using PostgreSQL and SQLAlchemy. Since i'm not a experienced programmer(just started using classes) and also quite new to databases i noticed some workflows i don't really understand.
What i understand up till now from classes is the following workflow:
# filename.py
class ClassName():
def __init__(self):
# do something
def some_funcion(self, var1, var2):
# do something with parameters
---------------------------------------
# main.py
from filename import ClassName
par1 = ...
par2 = ...
a = ClassName()
b = a.some_function(par1, par2)
Now i am creating tables from classes:
# base.py
from sqlalchemy.orm import declarative_base
Base = declarative_base()
# tables.py
from base import Base
from sqlalchemy import Column
from sqlalchemy import Integer, String
class A(Base):
__tablename__ = "a"
a_id = Column(Integer, primary_key=True)
a_column = Column(String(30))
class B(Base):
__tablename__ = "b"
b_id = Column(Integer, primary_key=True)
b_column = Column(String(30))
and
import typing
from base import Base
from sqlalchemy import create_engine
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy.orm import sessionmaker
from tables import A, B
metadata_obj = MetaData()
def create_tables(engine):
session = sessionmaker()
session.configure(bind=engine)
Base.metadata.create_all(bind=engine)
a = Table("a", metadata_obj, autoload_with=engine)
b = Table("b", metadata_obj, autoload_with=engine)
return(a, b) # not sure return is needed
if __name__ == "__main__":
username = "username"
password = "AtPasswordHere!"
dbname = "dbname"
url = "postgresql://" + username + ":" + password + "#localhost/" + dbname
engine = create_engine(url, echo=True, future=True)
a, b = create_tables(engine)
Everything works fine in that it creates Table A and Table B in the database. The point i don't understand is the following:
Both my IDE(pyflake) and LGTM complain 'Tables. ... imported but not used'. (EDIT i understand why it complains in the way it is not the normal Class flow. It is mor about Why it is not the normal class workflow)
Is this normal behavior for this usecase? I only see examples that make use of the above workflow
Are there better methods to create the same results (but without the warnings)
If this is the normal behavior: Is there an explanation for this? I didn't read it anywhere.

How to backfill an incrementing id using alembic in postgres

I have a flask app that is backed by a postgres database using flask-sqlalechmy. I've been using miguel grinberg's flask migrate to handle migrations, although I've come to realize that since it is a wrapper on top of alembic, I'm best served by asking questions framed in alembic.
The problem is that I have an association table that I forgot to add a unique id to.
Here is my class for the table with the new column. But I have some records in my database, so trying to run the default migration script of course gives me the "column cannot contain nullable values" error.
class HouseTurns(db.Model):
__tablename__ = 'house_turns'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) // the new column I want to add
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), primary_key=True)
house_id = db.Column(db.Integer, db.ForeignKey("houses.id"), primary_key=True)
movie_id = db.Column(db.Integer, db.ForeignKey("movies.id"), primary_key=True)
created_at = db.Column(db.DateTime, default=db.func.current_timestamp())
user = db.relationship(User, lazy="joined")
house = db.relationship(House, lazy="joined")
movie = db.relationship(Movie, lazy="joined")
And here's the migration script generated by alembic
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('house_turns', sa.Column('id', sa.Integer(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('house_turns', 'id')
# ### end Alembic commands ###
I am really at a loss for how to write a migration that backfills the ids for the existing records with unique values. They don't necessarily need to be unique ids, just incrementing integers.
In the upgrade method you need to alter the add_column statement so that the column is initially created with nullable=True, then you can backfill the column, and then alter the column back to nullable=False.
That is, this line:
op.add_column('house_turns', sa.Column('id', sa.Integer(), nullable=False))
becomes:
op.add_column('house_turns', sa.Column('id', sa.Integer(), nullable=True))
You can then add statements to backfill your column. For example, looking at this SO question 'How to update with incrementing value', would give you something along the following lines (untested):
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# Make the id initially nullable
op.add_column('house_turns', sa.Column('id', sa.Integer(), nullable=True))
# create the temporary sequence - will be dropped at the end of this session
op.execute('create temporary sequence sequence_house_turns')
# set the start value of the sequence to 1
op.execute("select setval('sequence_house_turns', 1")
# update the existing rows
op.execute('update house_turns set id=nextval('sequence_house_turns') where id is null')
# Make the id required
op.alter_column('house_turns', 'id', nullable=False)
# ### end Alembic commands ###

Alembic autogenerate empty migration file

I'm trying to connect the alembic library to the databases and sqlalchemy libraries. As a guide, I use this example link
My projects file:
db.py
from databases import Database
from sqlalchemy import MetaData, create_engine
DATABASE_URL = "postgresql://....#localhost:5432/db"
engine = create_engine(DATABASE_URL)
metadata = MetaData()
database = Database(DATABASE_URL)
models.py
from sqlalchemy import Table, Column, Integer, String, DateTime
from sqlalchemy.sql import func
from db import metadata
notes = Table(
"notes",
metadata,
Column("id", Integer, primary_key=True),
Column("title", String(50)),
Column("description", String(50)),
Column("created_date", DateTime, default=func.now(), nullable=False),
)
env.py (alembic settings)
from db import DATABASE_URL, metadata
....
#add new
target_metadata = metadata
...
#change
def run_migrations_online():
config.set_main_option('sqlalchemy.url', str(DATABASE_URL))
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
When I run
alembic revision --autogenerate -m 'Add notest table'
the new file at migrations/versions this context is created
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
I suppose it may be related to the use of the target_metadata = metadata variable. It seems to be all according to instructions, but the migrations do not work as expected.
If anyone has a similar problem. All you have to do is import the tables from models.py into the env.py file before metadata object.
env.py
...
from models.notes import notes
from db import DATABASE_URL, metadata
...

Error when doing delete and commit with sqlalchemy

I get an error I don't understand when I do session commit after a deletion like this: (in a shell with flask app context or anywhere while running the app)
>>> from app.extensions import db
>>> from app.models.user import User
>>> user = User.query.all()[0]
>>> db.session.delete(user)
>>> db.session.commit()
File
"/Users/hugo/Dropbox/lahey/api/.venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py",
line 962, in module
% (self._il_path, self._il_addtl)) ImportError: importlater.resolve_all() hasn't been called (this is sqlalchemy.orm
strategy_options)
My model for the object I try to delete looks like this:
import datetime
from sqlalchemy_utils.types.password import PasswordType
from sqlalchemy_utils import force_auto_coercion
from app.extensions import db
# Setup coercion of passwords
force_auto_coercion()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(PasswordType(schemes=['pbkdf2_sha512']), nullable=False)
name = db.Column(db.String(256))
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
Deleting objects of other models works fine. Could this somehow be because I'm using the PasswordType column from sqlalchemy_utils?
If you are using Flask, the docsting says you are wrong configuring the Column:
Lazy configuration of the type with Flask config:
import flask
from sqlalchemy_utils import PasswordType, force_auto_coercion
force_auto_coercion()
class User(db.Model):
__tablename__ = 'user'
password = db.Column(
PasswordType(
# The returned dictionary is forwarded to the CryptContext
onload=lambda **kwargs: dict(
schemes=flask.current_app.config['PASSWORD_SCHEMES'],
**kwargs
),
),
unique=False,
nullable=False,
)
I've worked out what caused this. I've been using package sqlalchemy_bulk_lazy_loader
which had a bug (strategy_options was not imported in a correct way). The issue is now fixed in the package
See full sqlalchemy mail list thread for full details