Alembic Revision Autogenerate Didn't Recognized The Default Value Change - sqlalchemy

I was testing Alembic.
Initially I created a model something like this:
from main import Base
from sqlalchemy import Column, BigInteger, SmallInteger, String, Sequence, ForeignKey
class Users(Base):
__tablename__ = "users"
id = Column(BigInteger, Sequence("user_id_seq"),
primary_key=True)
first_name = Column(String(50))
last_name = Column(String(50))
email = Column(String(255))
password = Column(String(60), nullable=True)
Then I created the revision in alembic and it worked absolutely fine and I got the result properly.
Then I added the user types table and then my models looked like this,
from main import Base
from sqlalchemy import Column, BigInteger, SmallInteger, String, Sequence, ForeignKey
class Users(Base):
__tablename__ = "users"
id = Column(BigInteger, Sequence("user_id_seq"),
primary_key=True)
first_name = Column(String(50))
last_name = Column(String(50))
email = Column(String(255))
password = Column(String(60), nullable=True)
user_type = Column(SmallInteger, ForeignKey(
"user_types.id", name="fk_user_type"))
class UserTypes(Base):
__tablename__ = "user_types"
id = Column(SmallInteger, Sequence("user_types_id_seq"),
primary_key=True)
type = Column(String(20))
Now I created the revision for this and obviously that also worked.
But then I thought that lets make the user_type default value 1. So I did a small change in Users model and added default value 1:
user_type = Column(SmallInteger, ForeignKey(
"user_types.id", name="fk_user_type"), default=1)
Now ideally if I created migration it should work. But it gave me the blank file:
"""Made Default Value Of user_type 1
Revision ID: 054b79123431
Revises: 84bc1adb3e66
Create Date: 2022-12-28 17:20:06.757224
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '054b79123431'
down_revision = '84bc1adb3e66'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
I also tried to add compare_server_default=True in the context.configure calls in both offline and online migration function as it was said in an answer I found on internet which was related to the same issue but that also didnt worked. Here is the link.
So if anyone knows the solution for this please tell me I would really be thankful to you!

Related

Alembic attempts to recreate all tables in the Base class on every migration

In my env.py I have set my target_metadata to Base.metadata which I import from models.py. I have a fresh database with a schema named basic that I want to use to create the tables and setup my models.py like this:
from datetime import datetime
from sqlalchemy import Column, DateTime, Integer, MetaData, String
from sqlalchemy.orm import declarative_base
Base = declarative_base(metadata=MetaData(schema='basic'))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
created_on = Column(DateTime, default=datetime.utcnow)
I run alembic revision --autogenerate -m"Create user model" and run alembic upgrade heads. Everything works as expected and I have table user in my database under the schema basic.
Now I want to add a table country. I add it to my models.py which now looks like this:
from datetime import datetime
from sqlalchemy import Column, DateTime, Integer, MetaData, String
from sqlalchemy.orm import declarative_base
Base = declarative_base(metadata=MetaData(schema='basic'))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
created_on = Column(DateTime, default=datetime.utcnow)
class Country(Base):
__tablename__ = 'country'
id = Column(Integer, primary_key=True)
country = Column(String, nullable=False)
created_on = Column(DateTime, default=datetime.utcnow)
I run alembic revision --autogenerate -m"Create country model" which creates a new versions file that looks like this:
"""Create country model
Revision ID: 0eef32919b0d
Revises: 2da4668d1069
Create Date: 2023-01-19 15:39:08.778274
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0eef32919b0d'
down_revision = '2da4668d1069'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('country',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('country', sa.String(), nullable=False),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='basic'
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('created_on', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
schema='basic'
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user', schema='basic')
op.drop_table('country', schema='basic')
# ### end Alembic commands ###
Why does it also try to create the table user again? Running this will give an error that the object basic.user already exists. How can I fix this so that it looks at the current state of the db and only wants to create the table country?
Setting the option include_schemas=True (which is suggested in this thread: Alembic - sqlalchemy does not detect existing tables) helps but then includes all schemas and I only want it to be aware of this single schema.
I only want it to be aware of this single schema.
Then you also need to use include_name=, like so:
def run_migrations_online():
# …
def include_name(name, type_, parent_names):
if type_ == "schema":
# note this will not include the default schema
return name in ["basic"]
else:
return True
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata,
include_schemas=True,
include_name=include_name
)

How to backfill an incrementing id using alembic in postgres

I have a flask app that is backed by a postgres database using flask-sqlalechmy. I've been using miguel grinberg's flask migrate to handle migrations, although I've come to realize that since it is a wrapper on top of alembic, I'm best served by asking questions framed in alembic.
The problem is that I have an association table that I forgot to add a unique id to.
Here is my class for the table with the new column. But I have some records in my database, so trying to run the default migration script of course gives me the "column cannot contain nullable values" error.
class HouseTurns(db.Model):
__tablename__ = 'house_turns'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True) // the new column I want to add
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), primary_key=True)
house_id = db.Column(db.Integer, db.ForeignKey("houses.id"), primary_key=True)
movie_id = db.Column(db.Integer, db.ForeignKey("movies.id"), primary_key=True)
created_at = db.Column(db.DateTime, default=db.func.current_timestamp())
user = db.relationship(User, lazy="joined")
house = db.relationship(House, lazy="joined")
movie = db.relationship(Movie, lazy="joined")
And here's the migration script generated by alembic
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('house_turns', sa.Column('id', sa.Integer(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('house_turns', 'id')
# ### end Alembic commands ###
I am really at a loss for how to write a migration that backfills the ids for the existing records with unique values. They don't necessarily need to be unique ids, just incrementing integers.
In the upgrade method you need to alter the add_column statement so that the column is initially created with nullable=True, then you can backfill the column, and then alter the column back to nullable=False.
That is, this line:
op.add_column('house_turns', sa.Column('id', sa.Integer(), nullable=False))
becomes:
op.add_column('house_turns', sa.Column('id', sa.Integer(), nullable=True))
You can then add statements to backfill your column. For example, looking at this SO question 'How to update with incrementing value', would give you something along the following lines (untested):
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
# Make the id initially nullable
op.add_column('house_turns', sa.Column('id', sa.Integer(), nullable=True))
# create the temporary sequence - will be dropped at the end of this session
op.execute('create temporary sequence sequence_house_turns')
# set the start value of the sequence to 1
op.execute("select setval('sequence_house_turns', 1")
# update the existing rows
op.execute('update house_turns set id=nextval('sequence_house_turns') where id is null')
# Make the id required
op.alter_column('house_turns', 'id', nullable=False)
# ### end Alembic commands ###

Why is deleted data still shown in Sqlalchemy Flask?

I am working on a task to remove the added pages to the postgresql database using sqlalchemy.
The page is getting deleted from the server but fails to get deleted from the database.
here's the function which deletes the page:
def delete_page(self, page_id, application):
# removed
removed_from_everyone = True
# fro campaign_id
for campaign_id in self.bandit_campaigns:
if page_id in self.bandit_pages[campaign_id].keys():
# If the page is active
if self.bandit_pages[campaign_id][page_id]:
removed_from_everyone = False
# check if the page exist adn if it's not used by anyone
if page_id in self.structure.keys() and removed_from_everyone:
del self.structure[page_id]
# for all the campaign
for campaign_id in self.bandit_campaigns:
# puts it in the new structure
del self.bandit_pages[campaign_id][page_id]
application.logger.info(f'page_id: {page_id}')
application.logger.info(f'type page_id: {type(page_id)}')
# remove arm
self.remove_arm(page_id)
application.logger.info(f'pages: {self.pages}')
# Backup of the situation, in this case save only the pages
# pickle.dump(self.structure, open('structure.pickle', "wb"))
# this one store the last know status
# pickle.dump(self.bandit_pages, open('bandit_pages.pickle', "wb"))
try:
pg = Structure.query.filter_by(page_url=page_id)
db.session.delete(pg)
bp = Bandit_pages.query.filter_by(campaign_id=campaign_id)
db.session.delete(bp)
db.session.commit()
except Exception as e:
print("exception in new page deletion", e)
db.session.rollback()
here's the code of Structure and Bandit_page tables creation:
class Structure(db.Model):
__tablename__ = 'structure'
arm_id = db.Column(db.Integer, primary_key=True)
page_url = db.Column(db.String())
def __init__(self, arm_id,page_url):
self.arm_id = arm_id
self.page_url = page_url
class Bandit_pages(db.Model):
__tablename__ = 'bandit_pages'
campaign_id = db.Column(db.String())
arm_id = db.Column(db.Integer)
status = db.Column(db.Boolean, default=False)
__table_args__ = (
PrimaryKeyConstraint('campaign_id', 'arm_id'),
{},)
def __init__(self, campaign_id, arm_id, status):
self.campaign_id = campaign_id
self.arm_id = arm_id
self.status = status
I tried a way to delete them by using a for loop and then deleting it but that didn't help.
Also the function to add pages is similar to the delete page function,so I am unclear where I am making a mistake. Please help me out. Thanks!
You Can Try these Commands ->
db.session.delete(me)
db.session.commit()
Your code is complicated: you use the same loop a few times with the same variable name.
As a result of this you try to remove only the last value from self.bandit_campaigns:
bp = Bandit_pages.query.filter_by(campaign_id=campaign_id)
db.session.delete(bp)
db.session.commit()
Also, I'm not sure that the keys of self.self.bandit_campaigns are you string ids, check this too.
For deleting all elements from self.bandit_campaigns u can use:
db.session.query(Bandit_pages).filter(Bandit_pages.campaign_id.in_(your_list_of_values)).delete()
So I solved the question by myself,Found that it was kind of a trivial mistake which was overlooked.
try:
Structure.query.filter_by(page_url='ml3', arm_id=1).delete() # .all()
#db.session.delete(pg)
Bandit_pages.query.filter_by(campaign_id='96879533', arm_id=1).delete() # .all()
#db.session.delete(bp)
db.session.commit()
db.session.expire_all()
except Exception as e:
print("exception in new page deletion", e)
db.session.rollback()
this piece of code(I changed a few lines) worked fine.

How to make the id auto increasing by 2 in the Model of Django?

Since the auto_increment setting in the MySQL is for the global, which cannot be set to a specific table?
I'm considering if it's possible to make the id auto increasing by 2 in the Model of Django?
models.py
class Video(models.Model):
name = model.CharField(max_length=100, default='')
upload_time = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name
What should I do? Thanks for ur help.
You could do it my overriding save() method of your model as
from django.db.models import Max, F
class Video(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=100, default='')
upload_time = models.DateTimeField(blank=True, null=True)
def save(self, *args, **kwargs):
if not self.pk:
max = Video.objects.aggregate(max=Max(F('id')))['max']
self.id = max + 2 if max else 1 # if the DB is empty
super().save(*args, **kwargs)
def __str__(self):
return self.name
the correct way is to change your mysql server settings
check this out: auto_increment_increment
Possible Solutions:
Assume I have a Model of Customer.
Customer.objects.order_by('primay_key_id ').last().primay_key_id + 2)
primay_key_id = models.IntegerField(default=(Customer.objects.order_by('primay_key_id ').last().primay_key_id + 2),primary_key=True)
or
from django.db import transaction
#Uncomment Lines for Django version less than 2.0
def save(self):
"Get last value of Code and Number from database, and increment before save"
#with transaction.atomic():
#top = Customer.objects.select_for_update(nowait=True).order_by('-customer_customerid')[0] #Ensures Lock on Database
top = Customer.objects.order_by('-id')[0]
self.id = top.id + 1
super(Customer, self).save()
The Above Code would not have a Concurrency Issue for Django 2.0 as:
As of Django 2.0, related rows are locked by default (not sure what the behaviour was before) and the rows to lock can be specified in the same style as select_related using the of parameter!
For Lower Versions, you need to be atomic!
or
from django.db import transaction
def increment():
with transaction.atomic():
ids = Customer.objects.all()
length = len(ids)-1
if(length<=0): #Changed to Handle empty Entries
return 1
else:
id = ids[length].customer_customerid
return id+2
or
from django.db import transaction
def increment():
with transaction.atomic():
return Customer.objects.select_for_update(nowait=True).order_by('-customer_customerid')[0] #Ensures Atomic Approach!
and set primary key in model to Integer Field and on every new entry primary_key_field=increment() Like
and then in your Models.py
set the Primary_Key to:
import increment()
primay_key_id = models.IntegerField(default=increment(),primary_key=True)

sqlalchemy ORM : how to declare a table class that contains multiple key?

I'm a newbie in Sqlalchemy.
I have a table with multiple key in column USERNAME.
This is what I've done in my model.
Model:
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root#localhost/admin'
db = SQLAlchemy(app)
class RADUSAGE(db.Model):
__tablename__ = 'RADUSAGE'
USERNAME = db.Column(db.String(513))
AGE = db.Column(db.Integer)
def __init__(self, USERNAME, AGE):
self.USERNAME = USERNAME
self.AGE = AGE
def __repr__(self):
return '<RADUSAGE %r>' % self.USERNAME
But I got an error:
File "/Users/admin/rad_env/lib/python2.7/site-packages/sqlalchemy/ext/declarative/base.py", line 530, in map
**self.mapper_args
File "<string>", line 2, in mapper
File "/Users/admin/rad_env/lib/python2.7/site-packages/sqlalchemy/orm/mapper.py", line 677, in __init__
self._configure_pks()
File "/Users/admin/rad_env/lib/python2.7/site-packages/sqlalchemy/orm/mapper.py", line 1277, in _configure_pks
(self, self.mapped_table.description))
sqlalchemy.exc.ArgumentError: Mapper Mapper|RADUSAGE|RADUSAGE could not assemble any primary key columns for mapped table 'RADUSAGE'
How can I declare this table class that contains multiple key in sqlalchemy? Hope someone can help me. Thanks in advance.
You should be able to specify primary_key=true in your mapping for each PK column:
USERNAME = db.Column(db.String(513), primary_key=True)
AGE = db.Column(db.Integer, primary_key=True)
Did you try that?