Working with Generator Context Manager in fastapi db session - sqlalchemy

I am using context manager with fastapi session, my setup is below:
from sqlmodel import create_engine, Session
from app.core.config import settings
engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True)
SessionLocal = Session(autocommit=False, autoflush=False, bind=engine)
then is then consumed by
from contextlib import contextmanager
from sqlmodel import Session
from app.db.session import SessionLocal
#contextmanager
def get_session():
with SessionLocal as session:
yield session
which this is used by an endpoint
#router.post("/")
def create_book(
*,
db: Session = Depends(get_session),
book_in: models.BookCreate,
current_user: models.User = Depends(get_current_active_user),
) -> models.BookCreate:
"""
Create new book.
"""
# print(current_user.id)
book = crud.book.create_with_owner(db=db, obj_in=book_in, owner_id=current_user.id)
return book
With the setup above, I am expecting everything to work, but that is not the case; instead, I am getting the error below:
backend-1 | File "/app/app/api/api_v1/endpoints/book.py", line 42, in create_book
backend-1 | book = crud.book.create_with_owner(db=db, obj_in=book_in, owner_id=current_user.id)
backend-1 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
backend-1 | File "/app/app/crud/crud_book.py", line 23, in create_with_owner
backend-1 | db.add(db_obj)
backend-1 | ^^^^^^
backend-1 | AttributeError: '_GeneratorContextManager' object has no attribute 'add'
below are the details from crud_book.py
class CRUDBook(CRUDBase[Book, BookCreate, BookUpdate]):
def create_with_owner(
self, db: Session, *, obj_in: BookCreate, owner_id: UUID
) -> Book:
# with get_session() as db:
obj_in_data = jsonable_encoder(obj_in)
obj_in_data = dict(obj_in)
print(db)
# TODO: Check if owner_id is none
db_obj = self.model(**obj_in_data, owner_id=owner_id)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
When introduced another with context with the crud_book.py, the session does work but that comes with a cost since the session only exists while creating the book object in db but gets closed there after thus ending up with the below error
backend-1 | File "/usr/local/lib/python3.11/site-packages/sqlmodel/main.py", line 597, in validate
backend-1 | return cls.from_orm(value)
backend-1 | ^^^^^^^^^^^^^^^^^^^
backend-1 | File "/usr/local/lib/python3.11/site-packages/sqlmodel/main.py", line 552, in from_orm
backend-1 | values, fields_set, validation_error = validate_model(cls, obj)
backend-1 | ^^^^^^^^^^^^^^^^^^^^^^^^
backend-1 | File "pydantic/main.py", line 1056, in pydantic.main.validate_model
backend-1 | File "pydantic/utils.py", line 441, in pydantic.utils.GetterDict.get
backend-1 | File "/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/attributes.py", line 482, in __get__
backend-1 | return self.impl.get(state, dict_)
backend-1 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
backend-1 | File "/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/attributes.py", line 942, in get
backend-1 | value = self._fire_loader_callables(state, key, passive)
backend-1 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
backend-1 | File "/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/attributes.py", line 978, in _fire_loader_callables
backend-1 | return self.callable_(state, passive)
backend-1 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
backend-1 | File "/usr/local/lib/python3.11/site-packages/sqlalchemy/orm/strategies.py", line 863, in _load_for_state
backend-1 | raise orm_exc.DetachedInstanceError(
backend-1 | sqlalchemy.orm.exc.DetachedInstanceError: Parent instance <Book at 0x7fc8a3087b90> is not bound to a Session; lazy load operation of attribute 'authors' cannot proceed (Background on this error at: https://sqlalche.me/e/14/bhk3)
just in case, below are the details that are expected during bookcreate
import datetime
from uuid import uuid4, UUID
from typing import TYPE_CHECKING
from sqlmodel import SQLModel, Field, Relationship
# if TYPE_CHECKING:
from .genre import Genre # noqa: F401
from .author import Author # noqa: F401
from .narrator import Narrator # noqa: F401
from .publisher import Publisher # noqa: F401
from .user import User, UserRead # noqa: F401
from .book_genre_link import BookGenreLink # noqa: F401
from .book_author_link import BookAuthorLink # noqa: F401
from .book_narrator_link import BookNarratorLink # noqa: F401
from .book_publisher_link import BookPublisherLink # noqa: F401
class BookBase(SQLModel):
title: str = Field(index=True)
subtitle: str | None = Field(default=None, index=True)
description: str = Field(index=True)
runtime: datetime.time | None = Field(default=None)
rating: float | None = Field(default=None)
published_date: datetime.date | None = Field(default=None)
cover_image: str | None = Field(default=None)
class Book(BookBase, table=True):
id: UUID = Field(
default_factory=uuid4,
primary_key=True,
index=True,
nullable=False,
)
owner: list["User"] = Relationship(back_populates="books")
owner_id: UUID | None = Field(default=None, foreign_key="user.id")
authors: list["Author"] = Relationship(
back_populates="books", link_model=BookAuthorLink
)
publishers: list["Publisher"] = Relationship(
back_populates="books", link_model=BookPublisherLink
)
genres: list["Genre"] = Relationship(
back_populates="books", link_model=BookGenreLink
)
narrators: list["Narrator"] = Relationship(
back_populates="books", link_model=BookNarratorLink
)
# creation_date: datetime = Field(default=datetime.utcnow())
# update_date: datetime = Field(default=datetime.utcnow())
class BookCreate(BookBase):
authors: list["Author"] = []
publishers: list["Publisher"] = []
genres: list["Genre"] = []
narrators: list["Narrator"] = []
Why do I need two multiple context managers to handle the session, or where have i gone wrong?

For some reason, FastAPI dependencies should be generators, not context managers (it's mentioned in the documentation). What I usually do is
from contextlib import contextmanager
def get_db():
# gaenerator using yield
db_context = contextmanager(get_db)
Then I can use Depends(get_db) in my FastAPI endpoints, and with db_context() as db: in other places (e.g. Celery tasks, CRON jobs, etc.).

Related

fastapi + sqlalchemy + pydantic → how to process relations

Using FastAPI I can't fetch all the data. For example I have this output :
Response body Download (but the items list is always empty):
[
{
"email": "mmm#gmail.com",
"id": 1,
"is_active": true,
"items": []
},
{
"email": "ee#gmail.com",
"id": 2,
"is_active": true,
"items": []
},
]
Endpoint are for example:
#app.get("/users/", response_model=list[schemas.User])
async def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = crud.get_users(db, skip=skip, limit=limit)
return users
#app.get("/users/{user_id}", response_model=schemas.User)
async def read_user(user_id: int, db: Session = Depends(get_db)):
db_user = crud.get_user(db, user_id=user_id)
if db_user is None:
raise HTTPException(status_code=404, detail="User not found")
return db_user
I have the following schemas (database is PostgreSQL):
from pydantic import BaseModel
class ItemBase(BaseModel):
title: str
description: str | None = None
class ItemCreate(ItemBase):
pass
class Item(ItemBase):
id: int
owner: int
class Config:
orm_mode = True
class UserBase(BaseModel):
email: str
class UserCreate(UserBase):
password: str
class User(UserBase):
id: int
is_active: bool
items: list[Item] = []
class Config:
orm_mode = True
Database is postgresql
Models are (table users and table items):
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from .database import Base
class User(Base):
__tablename__ = "users"
__table_args__ = {'schema': 'socialcampaigns'}
id = Column(Integer, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
hashed_password = Column(String)
is_active = Column(Boolean, default=True)
items_rel = relationship("Item", back_populates="owner_id_rel")
class Item(Base):
__tablename__ = "items"
__table_args__ = {'schema': 'socialcampaigns'}
id = Column(Integer, primary_key=True, index=True)
title = Column(String, index=True)
description = Column(String, index=True)
owner = Column(Integer, ForeignKey("socialcampaigns.users.id"))
owner_id_rel = relationship("User", back_populates="items_rel")
Crud code is:
from sqlalchemy.orm import Session
from . import models, schemas
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.id == user_id).first()
def get_users (db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
How to solve it? How do I view the list of Items when viewing users?

Django,how to filter multiple JSONField data?

Im using django with postgres i was able to add multiple filters in my views but mu question here is is there any possibility that i can filter multiple same jsonfield with different values:
ex i can filter localhost:127.0.0.1:products?areaOfUse=residential
so is there any possibilty that i can get the result of /products?areaOfUse=residential&areaOfUse=test
So from here i need to query two different json objects.
-Here are my views
class SubcategoriesProductsAPI(APIView):
# #cache_control(must_revalidate=True, max_age=3600)
def get (self, request, subCategoryId = None, pk = None):
try:
filters = {}
design = self.request.query_params.get('design', None)
dimension = self.request.query_params.get('dimension', None)
collectionName = self.request.query_params.get('collectionName', None)
material = self.request.query_params.get('material',None)
min_price = self.request.query_params.get('min_price',None)
max_price = self.request.query_params.get('max_price',None)
page = self.request.query_params.get('page', None)
wearLayer = self.request.query_params.get('wearLayer',None)
areaOfUse = self.request.query_params.getlist('areaOfUse',None)
productType = self.request.query_params.get('type', None)
installationMethod = self.request.query_params.get('installationMethod',None)
format_type = self.request.query_params.get('format_type',None)
wearLayer = self.request.query_params.get('wearLayer',None)
levelOfUse = self.request.query_params.get('levelOfUse',None)
if design is not None:
filters['product_options__options__data__design'] = design
if productType is not None:
filters['product_options__options__data__type'] = productType
if dimension is not None:
filters['product_options__options__data__dimensions__contains'] = [{'dimension': dimension}]
if collectionName is not None:
filters['product_options__options__data__collectionName'] = collectionName
if material is not None:
filters['product_options__options__data__material'] = material
if wearLayer is not None:
filters['product_options__options__data__wearLayer'] = wearLayer
if installationMethod is not None:
filters['product_options__options__data__installationMethod'] =installationMethod
if format_type is not None:
filters['product_options__options__data__format'] = format_type
if areaOfUse is not None:
filters['product_options__options__data__areaOfUse__contains'] = areaOfUse
if levelOfUse is not None:
filters['product_options__options__data__levelOfUse'] = levelOfUse
if min_price and max_price:
filters['product_options__options__data__dimensions__range__price'] = float(min_price)
filters['product_options__options__data__dimensions__0__price__lte'] = float(max_price)
queryset = Products.objects.filter(sub_categories_id = subCategoryId, is_active = True).select_related().filter(**filters)
if not queryset:
return JsonResponse({ 'status': False, 'msg': 'No products found', 'data': {} }, status=400)
if page is not None:
paginator = PageNumberPagination()
page = paginator.paginate_queryset(queryset, request)
if page is not None:
serializer = ProductSerializer(page, many=True)
return JsonResponse({ 'status': True, 'msg': 'Succesfully retrived products ', 'data': serializer.data, 'count': paginator.page.paginator.count, 'previous':paginator.get_previous_link(), 'next':paginator.get_next_link() }, status=200)
serializer = ProductSerializer(queryset, many=True)
return JsonResponse({ 'status': True, 'msg': 'Succesfully retrived products ', 'data': serializer.data }, status=200)
except Products.DoesNotExist:
return JsonResponse({ 'status': False, 'msg': 'Internal system error', 'data': {}}, status=500)
areaOfUse = self.request.query_params.getlist('areaOfUse[]',None)
/products?areaOfUse%5B%5D=residential&areaOfUse%5B%5D=test
import operator
from django.db.models import Q
from functools import reduce
queryset = Products.objects.filter(sub_categories_id = subCategoryId, is_active = True).select_related().filter(**filters)
if areaOfUse:
queryset.filter(
reduce(
operator.and_,
(Q(product_options__options__data__areaOfUse__contains=x) for x in areaOfUse)
)
)

Parse into JSON using Spark

I have retrieved a table from SQL Server which contains over 3 million records.
Top 10 Records:
+---------+-------------+----------+
|ACCOUNTNO|VEHICLENUMBER|CUSTOMERID|
+---------+-------------+----------+
| 10003014| MH43AJ411| 20000000|
| 10003014| MH43AJ411| 20000001|
| 10003015| MH12GZ3392| 20000002|
| 10003016| GJ15Z8173| 20000003|
| 10003018| MH05AM902| 20000004|
| 10003019| GJ15CD7657| 20001866|
| 10003019| MH02BY7774| 20000005|
| 10003019| MH02DG7774| 20000933|
| 10003019| GJ15CA7387| 20001865|
| 10003019| GJ15CB9601| 20001557|
+---------+-------------+----------+
only showing top 10 rows
Here ACCOUNTNO is unique, same ACCOUNTNO might have more than one VEHICLENUMBER, for each Vehicle we might have unique CUSTOMERID with respect to that VEHICLENUMBER
I want to export as a JSON format.
This is my code to achieve the output:
package com.issuer.pack2.spark
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql._
object sqltojson {
def main(args:Array[String])
{
System.setProperty("hadoop.home.dir", "C:/winutil/")
val conf = new SparkConf().setAppName("SQLtoJSON").setMaster("local[*]")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
val jdbcSqlConnStr = "jdbc:sqlserver://192.168.70.88;databaseName=ISSUER;user=bhaskar;password=welcome123;"
val jdbcDbTable = "[HISTORY].[TP_CUSTOMER_PREPAIDACCOUNTS]"
val jdbcDF = sqlContext.read.format("jdbc").options(Map("url" -> jdbcSqlConnStr,"dbtable" -> jdbcDbTable)).load()
// jdbcDF.show(10)
jdbcDF.registerTempTable("tp_customer_account")
val res01 = sqlContext.sql("SELECT ACCOUNTNO, VEHICLENUMBER, CUSTOMERID FROM tp_customer_account GROUP BY ACCOUNTNO, VEHICLENUMBER, CUSTOMERID ORDER BY ACCOUNTNO ")
// res01.show(10)
res01.coalesce(1).write.json("D:/res01.json")
}
}
The output I got:
{"ACCOUNTNO":10003014,"VEHICLENUMBER":"MH43AJ411","CUSTOMERID":20000001}
{"ACCOUNTNO":10003014,"VEHICLENUMBER":"MH43AJ411","CUSTOMERID":20000000}
{"ACCOUNTNO":10003015,"VEHICLENUMBER":"MH12GZ3392","CUSTOMERID":20000002}
{"ACCOUNTNO":10003016,"VEHICLENUMBER":"GJ15Z8173","CUSTOMERID":20000003}
{"ACCOUNTNO":10003018,"VEHICLENUMBER":"MH05AM902","CUSTOMERID":20000004}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"MH02BY7774","CUSTOMERID":20000005}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"GJ15CA7387","CUSTOMERID":20001865}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"GJ15CD7657","CUSTOMERID":20001866}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"MH02DG7774","CUSTOMERID":20000933}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"GJ15CB9601","CUSTOMERID":20001557}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"GJ15CD7387","CUSTOMERID":20029961}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"GJ15CF7747","CUSTOMERID":20009020}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"GJ15CB727","CUSTOMERID":20000008}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"GJ15CA7837","CUSTOMERID":20001223}
{"ACCOUNTNO":10003019,"VEHICLENUMBER":"GJ15CD7477","CUSTOMERID":20001690}
{"ACCOUNTNO":10003020,"VEHICLENUMBER":"MH01AX5658","CUSTOMERID":20000006}
{"ACCOUNTNO":10003021,"VEHICLENUMBER":"GJ15AD727","CUSTOMERID":20000007}
{"ACCOUNTNO":10003023,"VEHICLENUMBER":"GU15PP7567","CUSTOMERID":20000009}
{"ACCOUNTNO":10003024,"VEHICLENUMBER":"GJ15CA7567","CUSTOMERID":20000010}
{"ACCOUNTNO":10003025,"VEHICLENUMBER":"GJ5JB9312","CUSTOMERID":20000011}
But I want to get the JSON format output like this:
I have written the JSON below manually (maybe I have designed wrongly, I want that the ACCOUNTNO should be unique) for first three records of my above table.
{
"ACCOUNTNO":10003014,
"VEHICLE": [
{ "VEHICLENUMBER":"MH43AJ411", "CUSTOMERID":20000000},
{ "VEHICLENUMBER":"MH43AJ411", "CUSTOMERID":20000001}
],
"ACCOUNTNO":10003015,
"VEHICLE": [
{ "VEHICLENUMBER":"MH12GZ3392", "CUSTOMERID":20000002}
]
}
So, how to achieve this JSON format using Spark code?
Scala spark-sql
You can do the following (instead of registerTempTable you can usecreateOrReplaceTempView as registerTempTable is deprecated)
jdbcDF.createGlobalTempView("tp_customer_account")
val res01 = sqlContext.sql("SELECT ACCOUNTNO, collect_list(struct(`VEHICLENUMBER`, `CUSTOMERID`)) as VEHICLE FROM tp_customer_account GROUP BY ACCOUNTNO ORDER BY ACCOUNTNO ")
res01.coalesce(1).write.json("D:/res01.json")
You should get your desired output as
{"ACCOUNTNO":"10003014","VEHICLE":[{"VEHICLENUMBER":"MH43AJ411","CUSTOMERID":"20000000"},{"VEHICLENUMBER":"MH43AJ411","CUSTOMERID":"20000001"}]}
{"ACCOUNTNO":"10003015","VEHICLE":[{"VEHICLENUMBER":"MH12GZ3392","CUSTOMERID":"20000002"}]}
{"ACCOUNTNO":"10003016","VEHICLE":[{"VEHICLENUMBER":"GJ15Z8173","CUSTOMERID":"20000003"}]}
{"ACCOUNTNO":"10003018","VEHICLE":[{"VEHICLENUMBER":"MH05AM902","CUSTOMERID":"20000004"}]}
{"ACCOUNTNO":"10003019","VEHICLE":[{"VEHICLENUMBER":"GJ15CD7657","CUSTOMERID":"20001866"},{"VEHICLENUMBER":"MH02BY7774","CUSTOMERID":"20000005"},{"VEHICLENUMBER":"MH02DG7774","CUSTOMERID":"20000933"},{"VEHICLENUMBER":"GJ15CA7387","CUSTOMERID":"20001865"},{"VEHICLENUMBER":"GJ15CB9601","CUSTOMERID":"20001557"}]}
Scala spark API
Using spark scala API, you can do the following:
import org.apache.spark.sql.functions._
val res01 = jdbcDF.groupBy("ACCOUNTNO")
.agg(collect_list(struct("VEHICLENUMBER", "CUSTOMERID")).as("VEHICLE"))
res01.coalesce(1).write.json("D:/res01.json")
You should be getting the same answer as the sql way.
I hope the answer is helpful.

python csv writing SET values

Python 3.6
I am trying to write set values to CSV , I am getting the following output for the given code.
import csv
class test_write:
#classmethod
def test_write1(cls):
fieldnames1 = ['first_name', 'last_name']
cls.write_a_test1(fieldnames=fieldnames1)
#classmethod
def write_a_test1(cls, fieldnames):
with open('/Users/Desktop/delete1.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
abc = cls.var1()
writer.writerow(abc)
print("Done writing")
#staticmethod
def var1():
d = ('my', 'name', 'is', 'hahaha')
c = set()
abc = {'first_name': c, 'last_name': d}
return abc
test_write.test_write1()
When I open CSV file:
Output:
first_name last_name
set() ('my', 'name', 'is', 'hahaha')
I don't want it to print set() in the file if it is empty. Instead I need blank. Variable 'C' might have or might not have values it depends. How do I proceed with that.
Dictwriter expects the keys and values to be strings, so str is being called on the objects. What you should use is something like:
d = ('my', 'name', 'is', 'hahaha')
c = set()
abc = {'first_name': ' '.join(c), 'last_name': ' '.join(d)}
return abc
The the result of the file will be:
first_name,last_name
,my name is hahaha

sqlalchemy ---》type object 'role_user' has no attribute 'foreign_keys'

class User(Base):
"""
users table
"""
__tablename__ = 'users'
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8'
}
id = Column(INTEGER, primary_key=True)
email = Column(String(64), nullable=False, unique=True)
password = Column(String(64), nullable=False)
name = Column(String(32), nullable=False)
last_login_time = Column(DateTime, default='')
last_login_ip = Column(String(32), default='')
create_time = Column(DateTime, nullable=False)
status = Column(SMALLINT, default=0)
role = relationship("Role",
secondary="role_user",
backref=backref('user'))
def __init__(self, email, password, name, last_login_time=now,
last_login_ip='0', create_time=now, status=0):
self.email = email
self.password = md5(password)
self.name = name
self.last_login_time = last_login_time
self.last_login_ip = last_login_ip
self.create_time = create_time
self.status = status
def __repr__(self):
return "<User('%s', '%s')>" % (self.name, self.email)
class Role(Base):
"""
roles table
"""
__tablename__ = 'roles'
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8'
}
id = Column(SMALLINT(5), primary_key=True)
name = Column(String(32), nullable=False, unique=True)
def __init__(self, name):
self.name = name
def __repr__(self):
return "<Role('%s', '%s')>" % (self.id, self.name)
class role_user(Base):
"""
role_user association table
"""
__tablename__ = 'role_user'
__table_args__ = {
'mysql_engine': 'InnoDB',
'mysql_charset': 'utf8'
}
user_id = Column(INTEGER, ForeignKey('users.id'), primary_key=True)
role_id = Column(SMALLINT(5), ForeignKey('roles.id'), primary_key=True)
def __init__(self, user_id, role_id):
self.user_id = user_id
self.role_id = role_id
when I create an user,I meet the error:
u1 = User(email='john#example.com', password='12345', name='john')
Traceback (most recent call last):
File "<console>", line 1, in <module>
File "<string>", line 2, in __init__
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/instrumentation.py", line 310, in _new_state_if_none
state = self._state_constructor(instance, self)
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/util/langhelpers.py", line 582, in __get__
obj.__dict__[self.__name__] = result = self.fget(obj)
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/instrumentation.py", line 145, in _state_constructor
self.dispatch.first_init(self, self.class_)
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/event.py", line 409, in __call__
fn(*args, **kw)
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/mapper.py", line 2209, in _event_on_first_init
configure_mappers()
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/mapper.py", line 2118, in configure_mappers
mapper._post_configure_properties()
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/mapper.py", line 1242, in _post_configure_properties
prop.init()
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/interfaces.py", line 231, in init
self.do_init()
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/properties.py", line 1028, in do_init
self._setup_join_conditions()
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/properties.py", line 1102, in _setup_join_conditions
can_be_synced_fn=self._columns_are_mapped
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/relationships.py", line 114, in __init__
self._determine_joins()
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/orm/relationships.py", line 180, in _determine_joins
consider_as_foreign_keys=consider_as_foreign_keys
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.8.1dev-py2.6-linux-i686.egg/sqlalchemy/sql/util.py", line 345, in join_condition
b.foreign_keys,
AttributeError: type object 'role_user' has no attribute 'foreign_keys'
>>>
jiankong#ubuntu:~/git/admin-server$ python manage.py shell
Python 2.6.5 (r265:79063, Oct 1 2012, 22:07:21)
[GCC 4.4.3] on linux2
Type "help", "copyright", "credits" or "license" for more information.
(InteractiveConsole)
>>> from apps.auth.models import User
>>> u1 = User(email='john#example.com', password='12345', name='john')
what can i do to solve the question? thank you !!
you've got conflicting names for role_user, in that there's a class called role_user and a table with the same name. So secondary="role_user" picks the class object first. Maybe state it like this:
relationship("Role", secondary="role_user.__table__")
EDIT: ideally you'd define "role_user" as a Table only, and define it before you do "secondary". Follow the example at http://docs.sqlalchemy.org/en/latest/orm/relationships.html#many-to-many.