Multi Database with same table name in flask_sqlalchemy - sqlalchemy

I have multi databases, like order_db1、order_db2、order_db3,
but they all have table order, but data in order_db1.order is different from order_db2.order,
order_db1.order
| id | amount|
| 1| 100|
order_db2.order
| id | amount|
| 1| 200|
how can I access multi database with same table name in flask_sqlalchemy ?
I tried like this,use SQLALCHEMY_BINDS:
# -*- coding: utf-8 -*-
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import logging
class Application(Flask):
def __init__(self, import_name):
super(Application, self).__init__(import_name)
app = Application(__name__)
db = SQLAlchemy(app)
app.config['SQLALCHEMY_BINDS'] = {
'order_db1': 'mysql://%s:%s#%s/order_db1' % (mysql_user, quote_plus(mysql_pwd),mysql_host),
'order_db2': 'mysql://%s:%s#%s/order_db2' % (mysql_user, quote_plus(mysql_pwd),mysql_host),
}
class Order1(db.model):
__bind_key__ = 'order_db1'
__tablename__ = 'order'
id = db.colum(db.Interger)
amount = db.colum(db.Interger)
class Order2(db.model):
__bind_key__ = 'order_db2'
__tablename__ = 'order'
id = db.colum(db.Interger)
amount = db.colum(db.Interger)
q1=Order1.query.get(1)
q2=Order2.query.get(1)
It told me :
'sqlalchemy.exc.InvalidRequestError: Table 'order' is already defined for this MetaData instance. Specify 'extend_existing=True' to redefine options and columns on an existing Table object.'
But if I add __table_args = {'extend_existing': True}
class Order1(db.model):
__bind_key__ = 'order_db1'
__tablename__ = 'order'
__table_args = {'extend_existing': True}
id = db.colum(db.Interger)
amount = db.colum(db.Interger)
class Order2(db.model):
__bind_key__ = 'order_db2'
__tablename__ = 'order'
__table_args = {'extend_existing': True}
id = db.colum(db.Interger)
amount = db.colum(db.Interger)
q1=Order1.query.get(1)
q2=Order2.query.get(1)
the data in q1 is the same with q2, means id=1,amount=200
I want it like :
q1: id=1, amount=100
q2: id=1, amount=200

There are known issues with binds and identical table names. See this discussion and this possible workaround.
An alternative workaround is to use two Sqlalchemy instances, which may be or not acceptable for your use case. See single file example using SQLite.
import random
from flask import Flask, render_template_string
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_BINDS'] = {
'order_db1': 'sqlite:///order_db1.sqlite',
'order_db2': 'sqlite:///order_db2.sqlite',
}
db1 = SQLAlchemy(app)
db2 = SQLAlchemy(app)
class Order1(db1.Model):
__bind_key__ = 'order_db1'
__tablename__ = 'order'
id = db1.Column(db1.Integer, primary_key=True)
amount = db1.Column(db1.Integer)
class Order2(db2.Model):
__bind_key__ = 'order_db2'
__tablename__ = 'order'
id = db2.Column(db2.Integer, primary_key=True)
amount = db2.Column(db2.Integer)
_html_template = '''
<p>Order 1 count:{{order_1_count}}</p>
<p>Order 2 count:{{order_2_count}}</p>
'''
#app.route('/')
def index():
order_1_count = Order1.query.count()
order_2_count = Order2.query.count()
return render_template_string(_html_template, order_1_count=order_1_count, order_2_count=order_2_count)
#app.before_first_request
def build_sample_db():
db1.drop_all()
db2.drop_all()
db1.create_all()
db2.create_all()
# Create 100 records in db1
db1.session.add_all([Order1(amount=random.randint(0, 100)) for _ in enumerate(range(0, 100))])
db1.session.commit()
# Create 1000 records in db1
db2.session.add_all([Order2(amount=random.randint(1000, 10000)) for _ in enumerate(range(0, 1000))])
db2.session.commit()
return app
if __name__ == '__main__':
app.run()

Related

When I use Sqlalchemy to query all, why does it not return specific values and how to deal with it

The following is a screenshot of my code and database table data:
from sqlalchemy import Column, String, create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
engine = create_engine('mssql+pymssql://sa:12345678#XXXX:1433/YYYY')
Base = declarative_base(engine)
class User(Base):
__tablename__ = 'Products'
Id = Column(String(20), primary_key=True)
ProductName = Column(String(20))
ProductDesc = Column(String(50))
CreationTime = Column(String(20))
ProductCategory = Column(String(50))
def test():
db_session = sessionmaker(bind=engine)
session = db_session()
user = session.query(User).filter(User.Id == 5).all()
print(user)
=========================
query results:[<main.User object at 0x7fd56b265400>]
I want it to return the specific values of all data that meet the filtering conditions.
So,what went wrong?
This is the product table mapped above.
class BaseModel(object):
__abstract__ = True
def __init__(self):
self.__mapper__ = None
def __repr__(self):
fmt = u'[{}]'
attrs = (
(k, str(getattr(self, k)).replace(' ', '')) for k in self.__mapper__.columns.keys()
)
sattrs = ','.join('{}={!r}'.format(*x) for x in attrs)
return fmt.format(sattrs)
Base = declarative_base(cls=BaseModel)

Querying Flask-SQLAlchemy through two table joins - Join not working

I'm trying to join two tables.
Here are my tables simplified
class Spots(db.Model):
__tablename__ = "spots"
geobaseid = db.Column((db.Integer), primary_key=True,autoincrement=True)
spot = db.Column(db.String(50))
url_slug = db.Column(db.String(50))
region = db.Column(db.String(50))
country = db.Column(db.String(50))
chop = db.Column(db.Integer)
small_wave = db.Column(db.Integer)
flat = db.Column(db.Integer)
big_wave = db.Column(db.Integer)
west_direction = db.Column(db.Integer)
southwest_direction = db.Column(db.Integer)
amount = db.Column(db.Integer)
url_slug = db.Column(db.String(50))
forcast = db.relationship('Forcast_short')
class Forcast_short(db.Model):
__tablename__ = "forcast_short"
id = db.Column((db.Integer), primary_key=True,autoincrement=True)
date = db.Column(db.Date)
geobaseid = db.Column((db.Integer), ForeignKey('spots.geobaseid'))
spot = db.Column(db.String(50))
region = db.Column(db.String(50))
country = db.Column(db.String(50))
latitude = db.Column(db.Numeric(10,8))
longitude = db.Column(db.Numeric(10,8))
deg = db.Column(db.Numeric(65,1))
mps = db.Column(db.Numeric(65,1))
Here's my query that is not working
forcast_query = Forcast_short.query.join(Spots, Spots.geobaseid==Forcast_short.geobaseid).filter(Forcast_short.region == region).all()
What am I doing wrong?
When I run the query I only get results from Forcast_short with and without the filter.
<tbody>
<tr>{%for row in forcast_query%}
<td> {{row.spot}} </td>
<td>{{row.mps}} </td>
<td>{{row.url_slug}} </td>
</tr>
{%endfor%}
</tbody>
This query works in Mysql workbench.
select * from
(
SELECT * FROM sc_db2.forcast_short
) a
join
(
select * from sc_db2.spots
) b
on a.geobaseid = b.geobaseid
;
The way you made your models is quite confusing. In keeping with the documentation of sqlalchemy on Relationship Patterns, which I advise you to read, here is how you should set up a one to many relationship:
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child")
class Child(Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('parent.id'))
This relationship places a foreign key on the child table referencing the parent. relationship() is then specified on the parent, as referencing a collection of items represented by the child.
ABOUT YOUR QUERY:
After harmonizing your models, your request should look like this:
forcast_query = Forcast_short.query.join(Spots, Spots.geobaseid==Forcast_short.geobaseid).filter(Forcast_short.region == region).filter(Forcast_short.date == date_1).all()
Here's a shorter example which illustrates a one-to-many relationship between spots and forecasts (which I think is what you're trying to do):
from app import db
class Spot(db.Model):
id = db.Column(db.Integer, primary_key=True)
geobaseid = db.Column(db.Integer)
forecasts = db.relationship('Forecast', backref='spot', lazy='dynamic')
class Forecast(db.Model):
id = db.Column(db.Integer, primary_key=True)
region = db.Column(db.String(50))
spot_id = db.Column(db.Integer, db.ForeignKey('spot.id'))
The db object is set up in the app package initialiser, following Miguel Grinberg's pattern:
from flask import Flask
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import models
Note that having set up the foreign key relationship, you don't need an explicit join to access the parent's fields from the child object - SQLAlchemy allows you to just use dot notation:
>>> from app import db
>>> from app.models import Spot, Forecast
>>> for spot in Spot.query.all():
... print(spot.id, spot.geobaseid)
...
1 1
2 2
>>> for forecast in Forecast.query.all():
... print(forecast.id, forecast.region, forecast.spot_id, forecast.spot.geobaseid)
...
1 Scotland 2 2
2 England 2 2

Creating a self-referencing M2M relationship in SQLAlchemy (+Flask)

While trying to learn Flask, I am building a simple Twitter clone. This would include the ability for a User to follow other Users. I am trying to set up a relational database through SQLAlchemy to allow this.
I figured I would need a self-referencing many-to-many relationship on the User. Following from the SQLAlchemy documentation I arrived at:
#imports omitted
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///twitclone.db'
db = SQLAlchemy(app)
Base = declarative_base()
user_to_user = Table("user_to_user", Base.metadata,
Column("follower_id", Integer, ForeignKey("user.id"), primary_key=True),
Column("followed_id", Integer, ForeignKey("user.id"), primary_key=True)
)
class User(db.Model):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String, unique=False)
handle = Column(String, unique=True)
password = Column(String, unique=False)
children = relationship("tweet")
following = relationship("user",
secondary=user_to_user,
primaryjoin=id==user_to_user.c.follower_id,
secondaryjoin=id==user_to_user.c.followed_id,
backref="followed_by"
)
#Tweet class goes here
db.create_all()
if __name__ == "__main__":
app.run()
Running this code results in the database being created without any error messages. However, the whole part (table) connecting a user to a user is simply omitted. This is the definition of the User table:
CREATE TABLE user (
id INTEGER NOT NULL,
name VARCHAR,
handle VARCHAR,
password VARCHAR,
PRIMARY KEY (id),
UNIQUE (handle)
)
Why does SQLAlchemy not create the self-referential relationship for the User?
note: I am new to both Flask and SQLAlchemy and could be missing something obvious here.
Ok, it seems I mixed up two different styles of using SQLAlchemy with Flask: the declarative extension of SQLAlchemy and flask-sqlalchemy extension. Both are similar in capabilities with the difference being that the flask extension has some goodies like session handling. This is how I rewrote my code to strictly make use of flask-sqlalchemy.
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from datetime import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///kwek.db'
db = SQLAlchemy(app)
#Table to handle the self-referencing many-to-many relationship for the User class:
#First column holds the user who follows, the second the user who is being followed.
user_to_user = db.Table('user_to_user',
db.Column("follower_id", db.Integer, db.ForeignKey("user.id"), primary_key=True),
db.Column("followed_id", db.Integer, db.ForeignKey("user.id"), primary_key=True)
)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=False)
handle = db.Column(db.String(16), unique=True)
password = db.Column(db.String, unique=False)
kweks = db.relationship("Kwek", lazy="dynamic")
following = db.relationship("User",
secondary=user_to_user,
primaryjoin=id==user_to_user.c.follower_id,
secondaryjoin=id==user_to_user.c.followed_id,
backref="followed_by"
)
def __repr__(self):
return '<User %r>' % self.name
class Kwek(db.Model):
__tablename__ = 'kwek'
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(140), unique=False)
post_date = db.Column(db.DateTime, default=datetime.now())
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Kwek %r>' % self.content
if __name__ == "__main__":
app.run()

SQLAlchemy insert many-to-one entries

Sorry, if this is a newbie question but the documentation about the many-to-one relationship doesn't seems to cover this. I have been looking for something similar to this (under the "How to Insert / Add Data to Your Tables" section), however in the shown example this is always a unique insertion.
Basically, I want to populate my database with data located on my local machine. For the sake of simplicity I have constructed the below-shown example into a MWE that illustrates the problem. The problem consists of two tables called Price and Currency and the implementation is done in a declarative style.
model.py
from sqlalchemy import Column, Integer, String
from sqlalchemy import Float, BigInteger, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Currency(Base):
__tablename__ = 'Currency'
id = Column(Integer, primary_key=True)
unit = Column(String(16), unique=True)
def __init__(self, unit):
self.unit = unit
class Price(Base):
__tablename__ = 'Price'
id = Column(BigInteger, primary_key=True)
currency_id = Column(Integer, ForeignKey("Currency.id"), nullable=False)
currency = relationship("Currency", backref="Currency.id")
hour1 = Column(Float)
hour2 = Column(Float)
def __init__(self, hour1, hour2):
self.hour1 = hour1
self.hour2 = hour2
Currently, I am populating the database using following code:
script.py
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from model import *
engine = create_engine('sqlite:///example.db', echo=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
session = db_session()
Base.metadata.create_all(engine)
oPrice = Price(2.5, 2.5)
oPrice.currency = Currency("EUR")
session.add(oPrice)
tPrice = Price(5.5, 1.5)
tPrice.currency = Currency("EUR")
session.add(tPrice)
session.commit()
This creates an error
sqlalchemy.exc.IntegrityError: (IntegrityError) column unit is not unique u'INSERT INTO "Currency" (unit) VALUES (?)' ('EUR',)
What is the best strategy for populating my database, such that I ensure that my Currency.id and Price.currency_id mapping is correct? Should I make the model-classes look for uniqueness before they are initialized, and do I do that in associated with the other table?
I'd second what Antti has suggested since currencies have standard codes like 'INR', 'USD' etc, you can make currency_code as primary key.
Or in case you want to keep the numeric primary key then one of the options is:
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/UniqueObject
edit (adding example based on the recipe in the link above, the one with class decoartor)
database.py
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine('sqlite:///example.db', echo=True)
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
model.py
from sqlalchemy import Column, Integer, String
from sqlalchemy import Float, BigInteger, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.declarative import declarative_base
from database import db_session
Base = declarative_base()
def _unique(session, cls, hashfunc, queryfunc, constructor, arg, kw):
cache = getattr(session, '_unique_cache', None)
if cache is None:
session._unique_cache = cache = {}
key = (cls, hashfunc(*arg, **kw))
if key in cache:
return cache[key]
else:
with session.no_autoflush:
q = session.query(cls)
q = queryfunc(q, *arg, **kw)
obj = q.first()
if not obj:
obj = constructor(*arg, **kw)
session.add(obj)
cache[key] = obj
return obj
def unique_constructor(scoped_session, hashfunc, queryfunc):
def decorate(cls):
def _null_init(self, *arg, **kw):
pass
def __new__(cls, bases, *arg, **kw):
# no-op __new__(), called
# by the loading procedure
if not arg and not kw:
return object.__new__(cls)
session = scoped_session()
def constructor(*arg, **kw):
obj = object.__new__(cls)
obj._init(*arg, **kw)
return obj
return _unique(
session,
cls,
hashfunc,
queryfunc,
constructor,
arg, kw
)
# note: cls must be already mapped for this part to work
cls._init = cls.__init__
cls.__init__ = _null_init
cls.__new__ = classmethod(__new__)
return cls
return decorate
#unique_constructor(
db_session,
lambda unit: unit,
lambda query, unit: query.filter(Currency.unit == unit)
)
class Currency(Base):
__tablename__ = 'Currency'
id = Column(Integer, primary_key=True)
unit = Column(String(16), unique=True)
def __init__(self, unit):
self.unit = unit
class Price(Base):
__tablename__ = 'Price'
id = Column(BigInteger, primary_key=True)
currency_id = Column(Integer, ForeignKey("Currency.id"), nullable=False)
currency = relationship("Currency", backref="Currency.id")
hour1 = Column(Float)
hour2 = Column(Float)
def __init__(self, hour1, hour2):
self.hour1 = hour1
self.hour2 = hour2
script.py:
from model import *
from database import engine, db_session as session
Base.metadata.create_all(engine)
oPrice = Price(2.5, 2.5)
oPrice.currency = Currency("EUR")
session.add(oPrice)
tPrice = Price(5.5, 1.5)
tPrice.currency = Currency("EUR")
session.add(tPrice)
session.commit()
The best simplest solution is to use the currency codes as the primary keys in Currency, and foreign keys in Price. Then you can have
price.currency_id = "EUR"
This also makes your database tables more readable - as in you won't have 28342 but 'GBP'.

sqlalchemy - how to convert query with subquery into relationship

In the code below I want to replace all_holdings in Account with a property called holdings that returns the desired_holdings (which are the holdings representing the latest known quantity which can change over time). I'm having trouble figuring out how to construct the call to relationship.
In addition I'd appreciate any comments on the appropriateness of the pattern (keeping historic data in a single table and using a max date subquery to get most recent), as well as on better alternatives, or improvements to the query.
from sqlalchemy import Column, Integer, String, Date, DateTime, REAL, ForeignKey, func
from sqlalchemy.orm import relationship, aliased
from sqlalchemy.sql.operators import and_, eq
from sqlalchemy.ext.declarative import declarative_base
from db import session
import datetime
import string
Base = declarative_base()
class MySQLSettings(object):
__table_args__ = {'mysql_engine':'InnoDB'}
class Account(MySQLSettings, Base):
__tablename__ = 'account'
id = Column(Integer, primary_key=True)
name = Column(String(64))
all_holdings = relationship('Holding', backref='account')
def desired_holdings(self):
max_date_subq = session.query(Holding.account_id.label('account_id'),
Holding.stock_id.label('stock_id'),
func.max(Holding.as_of).label('max_as_of')). \
group_by(Holding.account_id, Holding.stock_id).subquery()
desired_query = session.query(Holding).join(Account,
Account.id==account.id).join(max_date_subq).\
filter(max_date_subq.c.account_id==account.id).\
filter(Holding.as_of==max_date_subq.c.max_as_of).\
filter(Holding.account_id==max_date_subq.c.account_id).\
filter(Holding.stock_id==max_date_subq.c.stock_id)
return desired_query.all()
def __init__(self, name):
self.name = name
class Stock(MySQLSettings, Base):
__tablename__ = 'stock'
id = Column(Integer, primary_key=True)
name = Column(String(64))
def __init__(self, name):
self.name = name
class Holding(MySQLSettings, Base):
__tablename__ = 'holding'
id = Column(Integer, primary_key=True)
account_id = Column(Integer, ForeignKey('account.id'), nullable=False)
stock_id = Column(Integer, ForeignKey('stock.id'), nullable=False)
quantity = Column(REAL)
as_of = Column(Date)
stock = relationship('Stock')
def __str__(self):
return "Holding(%f, '%s' '%s')"%(self.quantity, self.stock.name, str(self.as_of))
def __init__(self, account, stock, quantity, as_of):
self.account_id = account.id
self.stock_id = stock.id
self.quantity = quantity
self.as_of = as_of
if __name__ == "__main__":
ibm = Stock('ibm')
session.add(ibm)
account = Account('a')
session.add(account)
session.flush()
session.add_all([ Holding(account, ibm, 100, datetime.date(2001, 1, 1)),
Holding(account, ibm, 200, datetime.date(2001, 1, 3)),
Holding(account, ibm, 300, datetime.date(2001, 1, 5)) ])
session.commit()
print "All holdings by relation:\n\t", \
string.join([ str(h) for h in account.all_holdings ], "\n\t")
print "Desired holdings query:\n\t", \
string.join([ str(h) for h in account.desired_holdings() ], "\n\t")
The results when run are:
All holdings by relation:
Holding(100.000000, 'ibm' '2001-01-01')
Holding(200.000000, 'ibm' '2001-01-03')
Holding(300.000000, 'ibm' '2001-01-05')
Desired holdings query:
Holding(300.000000, 'ibm' '2001-01-05')
Following answer provided by Michael Bayer after I posted to sqlalchemy google group:
The desired_holdings() query is pretty complicated and I'm not seeing a win by trying to get relationship() to do it. relationship() is oriented towards maintaining the persistence between two classes, not as much a reporting technique (and anything with max()/group_by in it is referring to reporting).
I would stick #property on top of desired_holdings, use object_session(self) to get at "session", and be done.
See more information on query-enabled properties.