Print parent column name - sqlalchemy

Given the following codes:
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config["SQLALCHEMY_ECHO"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///test.db"
db = SQLAlchemy(app)
class File(db.Model):
__tablename__ = "file"
_id = db.Column(db.String, primary_key=True)
file_name = db.Column(db.String)
category_id = db.Column(db.String, db.ForeignKey("category._id"))
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Category(db.Model):
__tablename__ = "category"
_id = db.Column(db.String, primary_key=True)
name = db.Column(db.String)
files = db.relationship("File", backref="category")
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
db.drop_all()
db.create_all()
categories0 = Category(_id="aca50a26-5d3f-4c4d-872b-83b663d5304f",name="Apple")
files0 = File(_id="8a95ba11-e2aa-407d-bac9-609e6c559731",file_name="8a95ba11-e2aa-407d-bac9-609e6c559731.jpg",category=categories0)
db.session.add_all([categories0,files0])
db.session.commit()
results=db.session.query(File).join(Category, File.category).filter(Category._id=="aca50a26-5d3f-4c4d-872b-83b663d5304f").all()
#app.route('/print')
def printMsg():
return jsonify([c.as_dict() for c in results])
if __name__ == '__main__':
app.run(debug=True)
When I call the endpoint /print, it returns
[
{
"_id": "8a95ba11-e2aa-407d-bac9-609e6c559731",
"category_id": "aca50a26-5d3f-4c4d-872b-83b663d5304f",
"file_name": "8a95ba11-e2aa-407d-bac9-609e6c559731.jpg"
}
]
But I need the category_name as output
[
{
"_id": "8a95ba11-e2aa-407d-bac9-609e6c559731",
"category_id": "aca50a26-5d3f-4c4d-872b-83b663d5304f",
"category_name": "Apple",
"file_name": "8a95ba11-e2aa-407d-bac9-609e6c559731.jpg"
}
]
How should I achieve that?

Here's code working as you requested. There is some other help I'll offer in here...
You code iterated columns. That's the Database's view of things. You should iterate the attributes of the model. In many cases, they're the same for fields (not always). But for relationships, you HAVE to be looking at the model.
Instead of pulling the related attribute into the dictionary of fields, I offer a 2nd solution to NEST the related fields in the response. As your models grow and scale, you will appreciate this more than you may now.
I changed some formatting for PEP8. Even for SO posts, it helps readability and its always good practice.
I would STRONGLY consider you DO NOT use either of these solutions. Serialization is a tricky process. For simple fields/relationships like your example, it may not seem that way. Again, as you scale, you will be handling a lot of edge cases and writing code that's been written and tested many time before. Consider using Marshmallow / Flask-Marshmallow. It's a great library that makes serialization and relationship nesting trivial.
from flask import Flask, jsonify
from flask_sqlalchemy import SQLAlchemy
import sqlalchemy as sa
app = Flask(__name__)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:'
db = SQLAlchemy(app)
class File(db.Model):
__tablename__ = 'file'
_id = db.Column(
db.String,
primary_key=True,
)
file_name = db.Column(db.String, )
category_id = db.Column(
db.String,
db.ForeignKey('category._id'),
)
def as_dict(self):
"""Return serialzed attributes + related category name"""
serialized = {
attr.key: getattr(self, attr.key)
for attr in sa.orm.class_mapper(self.__class__).iterate_properties
if isinstance(attr, sa.orm.properties.ColumnProperty)
}
serialized['category_name'] = self.category.name
return serialized
def as_dict_with_relationships(self):
"""Return serialzed attributes + nested relationships"""
serialized = {}
for attr in sa.orm.class_mapper(self.__class__).iterate_properties:
if isinstance(attr, sa.orm.properties.ColumnProperty):
serialized[attr.key] = getattr(self, attr.key)
elif isinstance(attr, sa.orm.relationships.RelationshipProperty):
serialized[attr.key] = getattr(self, attr.key).as_dict()
else:
print(f'not handling {attr}, {type(attr)}')
return serialized
class Category(db.Model):
__tablename__ = 'category'
_id = db.Column(
db.String,
primary_key=True,
)
name = db.Column(db.String, )
files = db.relationship(
'File',
backref='category',
)
def as_dict(self):
return {
c.name: getattr(self, c.name)
for c in self.__table__.columns
}
db.drop_all()
db.create_all()
categories0 = Category(
_id='aca50a26-5d3f-4c4d-872b-83b663d5304f',
name='Apple',
)
files0 = File(
_id='8a95ba11-e2aa-407d-bac9-609e6c559731',
file_name='8a95ba11-e2aa-407d-bac9-609e6c559731.jpg',
category=categories0,
)
db.session.add_all([categories0, files0])
db.session.commit()
#app.route('/print')
def print_msg():
"""Return serialized results- top level attributes + explicit other data"""
results = db.session.query(File) \
.join(Category, File.category) \
.filter(
Category._id == 'aca50a26-5d3f-4c4d-872b-83b663d5304f',
) \
.all()
return jsonify([
record.as_dict()
for record in results
])
def print_msg2():
"""Return serialized results- top level attributes + nested relationships"""
results = db.session.query(File) \
.join(Category, File.category) \
.filter(
Category._id == 'aca50a26-5d3f-4c4d-872b-83b663d5304f',
) \
.all()
return jsonify([
record.as_dict_with_relationships()
for record in results
])
if __name__ == '__main__':
app.run(debug=True)

Related

How to use nested pydantic models for sqlalchemy in a flexible way

from fastapi import Depends, FastAPI, HTTPException, Body, Request
from sqlalchemy import create_engine, Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker, relationship
from sqlalchemy.inspection import inspect
from typing import List, Optional
from pydantic import BaseModel
import json
SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
app = FastAPI()
# sqlalchemy models
class RootModel(Base):
__tablename__ = "root_table"
id = Column(Integer, primary_key=True, index=True)
someRootText = Column(String)
subData = relationship("SubModel", back_populates="rootData")
class SubModel(Base):
__tablename__ = "sub_table"
id = Column(Integer, primary_key=True, index=True)
someSubText = Column(String)
root_id = Column(Integer, ForeignKey("root_table.id"))
rootData = relationship("RootModel", back_populates="subData")
# pydantic models/schemas
class SchemaSubBase(BaseModel):
someSubText: str
class Config:
orm_mode = True
class SchemaSub(SchemaSubBase):
id: int
root_id: int
class Config:
orm_mode = True
class SchemaRootBase(BaseModel):
someRootText: str
subData: List[SchemaSubBase] = []
class Config:
orm_mode = True
class SchemaRoot(SchemaRootBase):
id: int
class Config:
orm_mode = True
class SchemaSimpleBase(BaseModel):
someRootText: str
class Config:
orm_mode = True
class SchemaSimple(SchemaSimpleBase):
id: int
class Config:
orm_mode = True
Base.metadata.create_all(bind=engine)
# database functions (CRUD)
def db_add_simple_data_pydantic(db: Session, root: SchemaRootBase):
db_root = RootModel(**root.dict())
db.add(db_root)
db.commit()
db.refresh(db_root)
return db_root
def db_add_nested_data_pydantic_generic(db: Session, root: SchemaRootBase):
# this fails:
db_root = RootModel(**root.dict())
db.add(db_root)
db.commit()
db.refresh(db_root)
return db_root
def db_add_nested_data_pydantic(db: Session, root: SchemaRootBase):
# start: hack: i have to manually generate the sqlalchemy model from the pydantic model
root_dict = root.dict()
sub_dicts = []
# i have to remove the list form root dict in order to fix the error from above
for key in list(root_dict):
if isinstance(root_dict[key], list):
sub_dicts = root_dict[key]
del root_dict[key]
# now i can do it
db_root = RootModel(**root_dict)
for sub_dict in sub_dicts:
db_root.subData.append(SubModel(**sub_dict))
# end: hack
db.add(db_root)
db.commit()
db.refresh(db_root)
return db_root
def db_add_nested_data_nopydantic(db: Session, root):
print(root)
sub_dicts = root.pop("subData")
print(sub_dicts)
db_root = RootModel(**root)
for sub_dict in sub_dicts:
db_root.subData.append(SubModel(**sub_dict))
db.add(db_root)
db.commit()
db.refresh(db_root)
# problem
"""
if I would now "return db_root", the answer would be of this:
{
"someRootText": "string",
"id": 24
}
and not containing "subData"
therefore I have to do the following.
Why?
"""
from sqlalchemy.orm import joinedload
db_root = (
db.query(RootModel)
.options(joinedload(RootModel.subData))
.filter(RootModel.id == db_root.id)
.all()
)[0]
return db_root
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
#app.post("/addNestedModel_pydantic_generic", response_model=SchemaRootBase)
def addSipleModel_pydantic_generic(root: SchemaRootBase, db: Session = Depends(get_db)):
data = db_add_simple_data_pydantic(db=db, root=root)
return data
#app.post("/addSimpleModel_pydantic", response_model=SchemaSimpleBase)
def add_simple_data_pydantic(root: SchemaSimpleBase, db: Session = Depends(get_db)):
data = db_add_simple_data_pydantic(db=db, root=root)
return data
#app.post("/addNestedModel_nopydantic")
def add_nested_data_nopydantic(root=Body(...), db: Session = Depends(get_db)):
data = db_add_nested_data_nopydantic(db=db, root=root)
return data
#app.post("/addNestedModel_pydantic", response_model=SchemaRootBase)
def add_nested_data_pydantic(root: SchemaRootBase, db: Session = Depends(get_db)):
data = db_add_nested_data_pydantic(db=db, root=root)
return data
Description
My Question is:
How to make nested sqlalchemy models from nested pydantic models (or python dicts) in a generic way and write them to the database in "one shot".
My example model is called RootModel and has a list of submodels called "sub models" in subData key.
Please see above for pydantic and sqlalchemy definitions.
Example:
The user provides a nested json string:
{
"someRootText": "string",
"subData": [
{
"someSubText": "string"
}
]
}
Open the browser and call the endpoint /docs.
You can play around with all endpoints and POST the json string from above.
/addNestedModel_pydantic_generic
When you call the endpoint /addNestedModel_pydantic_generic it will fail, because sqlalchemy cannot create the nested model from pydantic nested model directly:
AttributeError: 'dict' object has no attribute '_sa_instance_state'
​/addSimpleModel_pydantic
With a non-nested model it works.
The remaining endpoints are showing "hacks" to solve the problem of nested models.
/addNestedModel_pydantic
In this endpoint is generate the root model and andd the submodels with a loop in a non-generic way with pydantic models.
/addNestedModel_pydantic
In this endpoint is generate the root model and andd the submodels with a loop in a non-generic way with python dicts.
My solutions are only hacks, I want a generic way to create nested sqlalchemy models either from pydantic (preferred) or from a python dict.
Environment
OS: Windows,
FastAPI Version : 0.61.1
Python version: Python 3.8.5
sqlalchemy: 1.3.19
pydantic : 1.6.1
I haven't found a nice built-in way to do this within pydantic/SQLAlchemy. How I solved it: I gave every nested pydantic model a Meta class containing the corresponding SQLAlchemy model. Like so:
from pydantic import BaseModel
from models import ChildDBModel, ParentDBModel
class ChildModel(BaseModel):
some_attribute: str = 'value'
class Meta:
orm_model = ChildDBModel
class ParentModel(BaseModel):
child: SubModel
That allowed me to write a generic function that loops through the pydantic object and transforms submodels into SQLAlchemy models:
def is_pydantic(obj: object):
"""Checks whether an object is pydantic."""
return type(obj).__class__.__name__ == "ModelMetaclass"
def parse_pydantic_schema(schema):
"""
Iterates through pydantic schema and parses nested schemas
to a dictionary containing SQLAlchemy models.
Only works if nested schemas have specified the Meta.orm_model.
"""
parsed_schema = dict(schema)
for key, value in parsed_schema.items():
try:
if isinstance(value, list) and len(value):
if is_pydantic(value[0]):
parsed_schema[key] = [schema.Meta.orm_model(**schema.dict()) for schema in value]
else:
if is_pydantic(value):
parsed_schema[key] = value.Meta.orm_model(**value.dict())
except AttributeError:
raise AttributeError("Found nested Pydantic model but Meta.orm_model was not specified.")
return parsed_schema
The parse_pydantic_schema function returns a dictionary representation of the pydantic model where submodels are substituted by the corresponding SQLAlchemy model specified in Meta.orm_model. You can use this return value to create the parent SQLAlchemy model in one go:
parsed_schema = parse_pydantic_schema(parent_model) # parent_model is an instance of pydantic ParentModel
new_db_model = ParentDBModel(**parsed_schema)
# do your db actions/commit here
If you want you can even extend this to also automatically create the parent model, but that requires you to also specify the Meta.orm_model for all pydantic models.
Using a validators is a lot simpler:
SQLAlchemy models.py:
class ChildModel(Base):
__tablename__ = "Child"
name: str = Column(Unicode(255), nullable=False, primary_key=True)
class ParentModel(Base):
__tablename__ = "Parent"
some_attribute: str = Column(Unicode(255))
children = relationship("Child", lazy="joined", cascade="all, delete-orphan")
#validates("children")
def adjust_children(self, _, value) -> ChildModel:
"""Instantiate Child object if it is only plain string."""
if value and isinstance(value, str):
return ChildModel(some_attribute=value)
return value
Pydantic schema.py:
class Parent(BaseModel):
"""Model used for parents."""
some_attribute: str
children: List[str] = Field(example=["foo", "bar"], default=[])
#validator("children", pre=True)
def adjust_children(cls, children):
"""Convert to plain string if it is a Child object."""
if children and not isinstance(next(iter(children), None), str):
return [child["name"] for child in children]
return children
Nice function #dann, for more than two level of nesting you can use this recursive function :
def pydantic_to_sqlalchemy_model(schema):
"""
Iterates through pydantic schema and parses nested schemas
to a dictionary containing SQLAlchemy models.
Only works if nested schemas have specified the Meta.orm_model.
"""
parsed_schema = dict(schema)
for key, value in parsed_schema.items():
try:
if isinstance(value, list) and len(value) and is_pydantic(value[0]):
parsed_schema[key] = [
item.Meta.orm_model(**pydantic_to_sqlalchemy_model(item))
for item in value
]
elif is_pydantic(value):
parsed_schema[key] = value.Meta.orm_model(
**pydantic_to_sqlalchemy_model(value)
)
except AttributeError:
raise AttributeError(
f"Found nested Pydantic model in {schema.__class__} but Meta.orm_model was not specified."
)
return parsed_schema
Use it sparingly ! is you have a cyclical nesting it will loop forever.
And then call you data transformer like this :
def create_parent(db: Session, parent: Parent_pydantic_schema):
db_parent = Parent_model(**pydantic_to_sqlalchemy_model(intent))
db.add(db_parent)
db.commit()
return db_parent

ID collision with Graphene-SQLAlchemy Interface class plus Node Interface

I've written Graphene models for polymorphic entities represented in my database w/SQLalchemy.
The problem is simple:
I want to create an interface that reflects my SQLAlchemy models for Graphene but also either a) implements Node or b) does not conflict with Node and allows me to retrieve the model's ID without needing to add ... on Node {id} to the query string.
have to exclude the ID field from my ORM-based interface or field conflicts with the Node interface, by doing so in order to get the ID then you need to add ...on Node { id }, which is ugly.
I created an SQLAlchemyInterface object that extends graphene.Interface. Many (but not all) of my models used this as well as Node as interfaces. The first problem was that this contains an ID field and it conflicted with the Node interface.
I excluded the id field to not interfere with Node, but then found I could not directly query ID on my models anymore, and had to add ... on Node {id} to the query string.
I then decided to have this SQLAlchemyInterface extend Node. I don't love this approach because I need to use another (named) Node interface for all of my models that don't necessarily need to implement SQLAlchemyInterface
class SQLAlchemyInterface(Node):
#classmethod
def __init_subclass_with_meta__(
cls,
model=None,
registry=None,
only_fields=(),
exclude_fields=(),
connection_field_factory=default_connection_field_factory,
_meta=None,
**options
):
_meta = SQLAlchemyInterfaceOptions(cls)
_meta.name = f'{cls.__name__}Node'
autoexclude_columns = exclude_autogenerated_sqla_columns(model=model)
exclude_fields += autoexclude_columns
assert is_mapped_class(model), (
"You need to pass a valid SQLAlchemy Model in " '{}.Meta, received "{}".'
).format(cls.__name__, model)
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
"The attribute registry in {} needs to be an instance of "
'Registry, received "{}".'
).format(cls.__name__, registry)
sqla_fields = yank_fields_from_attrs(
construct_fields(
model=model,
registry=registry,
only_fields=only_fields,
exclude_fields=exclude_fields,
connection_field_factory=connection_field_factory
),
_as=Field
)
if not _meta:
_meta = SQLAlchemyInterfaceOptions(cls)
_meta.model = model
_meta.registry = registry
connection = Connection.create_type(
"{}Connection".format(cls.__name__), node=cls)
assert issubclass(connection, Connection), (
"The connection must be a Connection. Received {}"
).format(connection.__name__)
_meta.connection = connection
if _meta.fields:
_meta.fields.update(sqla_fields)
else:
_meta.fields = sqla_fields
super(SQLAlchemyInterface, cls).__init_subclass_with_meta__(_meta=_meta, **options)
#classmethod
def Field(cls, *args, **kwargs): # noqa: N802
return NodeField(cls, *args, **kwargs)
#classmethod
def node_resolver(cls, only_type, root, info, id):
return cls.get_node_from_global_id(info, id, only_type=only_type)
#classmethod
def get_node_from_global_id(cls, info, global_id, only_type=None):
try:
node: DeclarativeMeta = one_or_none(session=info.context.get('session'), model=cls._meta.model, id=global_id)
return node
except Exception:
return None
#staticmethod
def from_global_id(global_id):
return global_id
#staticmethod
def to_global_id(type, id):
return id
Interface impls, Models + Query code examples:
class CustomNode(Node):
class Meta:
name = 'UuidNode'
#staticmethod
def to_global_id(type, id):
return '{}:{}'.format(type, id)
#staticmethod
def get_node_from_global_id(info, global_id, only_type=None):
type, id = global_id.split(':')
if only_type:
# We assure that the node type that we want to retrieve
# is the same that was indicated in the field type
assert type == only_type._meta.name, 'Received not compatible node.'
if type == 'User':
return one_or_none(session=info.context.get('session'), model=User, id=global_id)
elif type == 'Well':
return one_or_none(session=info.context.get('session'), model=Well, id=global_id)
class ControlledVocabulary(SQLAlchemyInterface):
class Meta:
name = 'ControlledVocabularyNode'
model = BaseControlledVocabulary
class TrackedEntity(SQLAlchemyInterface):
class Meta:
name = 'TrackedEntityNode'
model = TrackedEntityModel
class Request(SQLAlchemyObjectType):
"""Request node."""
class Meta:
model = RequestModel
interfaces = (TrackedEntity,)
class User(SQLAlchemyObjectType):
"""User Node"""
class Meta:
model = UserModel
interfaces = (CustomNode,)
class CvFormFieldValueType(SQLAlchemyObjectType):
class Meta:
model = CvFormFieldValueTypeModel
interfaces = (ControlledVocabulary,)
common_field_kwargs = {'id': graphene.UUID(required=False), 'label': graphene.String(required=False)}
class Query(graphene.ObjectType):
"""Query objects for GraphQL API."""
node = CustomNode.Field()
te_node = TrackedEntity.Field()
cv_node = ControlledVocabulary.Field()
# Non-Tracked Entities:
users: List[User] = SQLAlchemyConnectionField(User)
# Generic Query for any Tracked Entity:
tracked_entities: List[TrackedEntity] = FilteredConnectionField(TrackedEntity, sort=None, filter=graphene.Argument(TrackedEntityInput))
# Generic Query for any Controlled Vocabulary:
cv: ControlledVocabulary = graphene.Field(ControlledVocabulary, controlled_vocabulary_type_id=graphene.UUID(required=False),
base_entry_key=graphene.String(required=False),
**common_field_kwargs)
cvs: List[ControlledVocabulary] = FilteredConnectionField(ControlledVocabulary, sort=None, filter=graphene.Argument(CvInput))
#staticmethod
def resolve_with_filters(info: ResolveInfo, model: Type[SQLAlchemyObjectType], **kwargs):
query = model.get_query(info)
log.debug(kwargs)
for filter_name, filter_value in kwargs.items():
model_filter_column = getattr(model._meta.model, filter_name, None)
log.debug(type(filter_value))
if not model_filter_column:
continue
if isinstance(filter_value, SQLAlchemyInputObjectType):
log.debug(True)
filter_model = filter_value.sqla_model
q = FilteredConnectionField.get_query(filter_model, info, sort=None, **kwargs)
# noinspection PyArgumentList
query = query.filter(model_filter_column == q.filter_by(**filter_value))
log.info(query)
else:
query = query.filter(model_filter_column == filter_value)
return query
def resolve_tracked_entity(self, info: ResolveInfo, **kwargs):
entity: TrackedEntity = Query.resolve_with_filters(info=info, model=BaseTrackedEntity, **kwargs).one()
return entity
def resolve_tracked_entities(self, info, **kwargs):
query = Query.resolve_with_filters(info=info, model=BaseTrackedEntity, **kwargs)
tes: List[BaseTrackedEntity] = query.all()
return tes
def resolve_cv(self, info, **kwargs):
cv: List[BaseControlledVocabulary] = Query.resolve_with_filters(info=info, model=BaseControlledVocabulary, **kwargs).one()
log.info(cv)
return cv
def resolve_cvs(self, info, **kwargs):
cv: List[BaseControlledVocabulary] = Query.resolve_with_filters(info=info, model=BaseControlledVocabulary, **kwargs).all()
return cv
schema:
schema = Schema(query=Query, types=[*tracked_members, *cv_members])
I would like to be able to not extend Node with SQLAlchemyInterface and rather add Node back to the list of interfaces for TrackedEntity and ControlledVocabulary but be able to perform a query like this:
query queryTracked {
trackedEntities{
id
(other fields)
... on Request {
(request specific fields)
}
}

Importing data to a particular model

I'm starting with instances of Order object and trying to transform them into a JSON format, which will be used to update a table. I'm new to Django-specific code constructs.
My chosen approach is:
Load the data from database as multiple Order instances
Transform the Order instances into an intermediary Table object (the format is given)
Serialize the Table object into JSON
I have gone quite far so far, but I can't make the whole application run, except for running it in the Python shell. I have the models and serializers already in place.
Can someone offer his help how to mark all Order instances at once and transform them to Table in one API call and anything else that is missing in this simple example?
models.py:
# Order corresponds to Line in the Table
class Order(models.Model):
doc = models.CharField(max_length=200, blank=True, null=True)
order = models.CharField(max_length=200, blank=True, null=True)
nothing = models.CharField(max_length=200, blank=True, null=True)
def __str__(self):
return self.order
class Table(models.Model):
pass
class Column(models.Model):
data = models.CharField(max_length=200, blank=True, null=True)
table = models.ForeignKey(Table)
def __str__(self):
return self.data
class Line(models.Model):
doc = models.CharField(max_length=200, blank=True, null=True)
order = models.CharField(max_length=200, blank=True, null=True)
nothing = models.CharField(max_length=200, blank=True, null=True)
table = models.ForeignKey(Table)
def __str__(self):
return self.order
serializers.py:
class TableSerializer(serializers.ModelSerializer):
columns = ColumnSerializer(many=True)
lines = LineSerializer(many=True)
class Meta:
model = Table
fields = [
'columns',
'lines'
]
class ColumnSerializer(serializers.ModelSerializer):
class Meta:
model = Column
fields = [
'data'
]
class LineSerializer(serializers.ModelSerializer):
class Meta:
model = Line
fields = [
'doc',
'order',
'nothing'
]
For this relational data:
doc order nothing
564251422 564210 5648
546546545 98745 4668
JSON output should be:
{
"columns": [
{
"data": "doc"
},
{
"data": "order"
},
{
"data": "nothing"
}
],
"lines": [
{
"doc": "564251422",
"nothing": 0.0,
"order": "56421"
},
{
"doc": "546546545",
"nothing": 0.0,
"order": "98745"
}
]
}
Shell commands:
import polls.models
polls.models.Order.objects.all() # outputs all orders
table = polls.models.Table()
polls.models.Column(data="doc",table=table)
polls.models.Column(data="order",table=table)
polls.models.Column(data="nothing",table=table)
polls.models.Line(doc="564251422",order="56421",nothing="0.0",table=table)
polls.models.Line(doc="546546545",order="98745",nothing="0.0",table=table)
views.py (update):
bunch = OrderSerializer(Order.objects.all(), many=True)
headers = bunch.data[0].keys()
headers_prepared = map (lambda x: {'data': x} , headers)
ordered_all = ( ('columns', headers_prepared), ('lines', bunch.data) )
data = collections.OrderedDict(ordered_all)
data_json = JSONRenderer().render(data)
return JsonResponse(data_json, safe=False)
As I see this question may relate to another (mentioned in comments) , but for this particular case...
As you are using DjangoRestFramework (the right way), to get all data in Json format you may:
in serialisers.py:
from rest_framework import serializers
from polls.models import Order
class OrderSerializer(serializers.ModelSerializer):
class Meta:
model = Order
fields = ('doc', 'order', 'nothing')
# note we do not use other serializers
next in shell (views in future):
from rest_framework.renderers import JSONRenderer
from polls.models import Order
from polls.serializers import OrderSerializer
bunch = OrderSerializer(Order.objects.all(), many=True)
#this will output "lines" part of desired output
JSONRenderer().render(bunch.data)
#next to get columns, or i say headers
headers = bunch.data[0].keys()
# headers = ['doc','order','nothing']
# !note will fail if bunch.data is empty
headers_prepared = map (lambda x: {'data': x} , headers)
# headers_prepared = [{'data': 'doc'}, {'data': 'order'}, {'data': 'nothing'}]
import collections # need to use OrderedDict to store in our sequence
ordered_all = ( ('columns', headers_prepared), ('lines', bunch.data) )
#finally desired output
JSONRenderer().render( collections.OrderedDict(ordered_all) )
#all code and output tested on my dummy data
UPDATE in urls.py:
urlpatterns = [
...
url(r'my/', my),
...
]
add in views.py:
#as you sileny JSONResponse (from RestFramework docs) - need to show it
from django.http import HttpResponse
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
#now our view
import collections
def my(request):
bunch = OrderSerializer(Order.objects.all(), many=True)
# to get headers independent of presence of orders
empty = OrderSerializer()
headers = empty.data.keys()
headers_prepared = map (lambda x: {'data': x} , headers)
ordered_all = ( ('columns', headers_prepared), ('lines', bunch.data) )
out = collections.OrderedDict(ordered_all)
#finally desired output
return JSONResponse( out )

Tastypie API return only one object for given parameters

I am overwriting the get_obj_list function. Following some parameters and a random function I would like to return an object related to the actual object that follows the parameters. This works fine. How can I return only this obj instead of a one-entry-list? Is there another function that better fits my purpose?
class SentenceRandomResource(ModelResource):
class Meta:
queryset = Sentence.objects.filter()
resource_name = 'sentence/random'
always_return_data = True
authorization = ReadOnlyAuthorization()
filtering = {'internal': ALL}
def obj_get_list(self, bundle, **kwargs):
if 'case' in bundle.request.GET.keys() and 'lemma' in bundle.request.GET.keys() :
if 'number' in bundle.request.GET.keys() :
words = Word.objects.filter(case = bundle.request.GET['case'], number = bundle.request.GET['number'], lemma = bundle.request.GET['lemma'])
else :
words = Word.objects.filter(case = bundle.request.GET['case'], lemma = bundle.request.GET['lemma'])
number_of_words = len(words)
if number_of_words > 0 :
random_index = int(random.random()*number_of_words)+0
random_word = words[random_index]
sentence = random_word.sentence
return [sentence]
else: ...
else: ...
Thanks to method prepend_url you may add some special functionality not included in RESTful principles.
import random
from tastypie.http import HttpBadRequest
class SentenceRandomResource(ModelResource):
class Meta:
queryset = Sentence.objects.filter()
resource_name = 'sentence/random'
always_return_data = True
authorization = ReadOnlyAuthorization()
filtering = {'internal': ALL}
def prepend_urls(self, *args, **kwargs):
name = 'get_one_random'
return [url(r"^(?P<resource_name>%s)/%s%s$" %
(self._meta.resource_name, name, trailing_slash()),
self.wrap_view(name), name="api_%s" % name)]
def get_one_random(self, request, **kwargs):
"""
Gets one random sentence of sentences with provided `case` and `lemma`
params.
"""
case = request.GET.get('case')
lemma = request.GET.get('lemma')
number = request.GET.get('number')
if case and lemma:
query_params = {'case': case, 'lemma': lemma}
if number is not None:
query_params['number'] = number
words = Word.objects.filter(**query_params)
word = random.choice(words)
return self.create_response(request, {'sentence': word.sentence.__dict__})
else:
return self.error_response(request, {'error': 'lemma and case are required.'},
response_class=HttpBadRequest)
Example use:
GET ..../sentence/random/get_one_random/?case=1&lemma=2
{'sentence': 'asdfasdf'}

SQLAlchemy session voes in unittest

I've just started using SQLAlchemy a few days ago and right now I'm stuck with a problem that I hope anyone can shed some light on before I loose all my hair.
When I run a unittest, see snippet below, only the first test in the sequence is passing. The test testPhysicalPrint works just fine, but testRecordingItem fails with NoResultFound exception - No row was found for one(). But if I remove testPhysicalPrint from the test class, then testRecordingItem works.
I assume that the problem has something to do with the session, but I can't really get a grip of it.
In case anyone wonders, the setup is as follows:
Python 3.1 (Ubuntu 10.04 package)
SQLAlchemy 0.7.2 (easy_install:ed)
PostgreSQL 8.4.8 (Ubuntu 10.04 package)
PsycoPG2 2.4.2 (easy_installed:ed)
Exemple test:
class TestSchema(unittest.TestCase):
test_items = [
# Some parent class products
PrintItem(key='p1', title='Possession', dimension='30x24'),
PrintItem(key='p2', title='Andrzej Żuławski - a director', dimension='22x14'),
DigitalItem(key='d1', title='Every Man His Own University', url='http://www.gutenberg.org/files/36955/36955-h/36955-h.htm'),
DigitalItem(key='d2', title='City Ballads', url='http://www.gutenberg.org/files/36954/36954-h/36954-h.htm'),
]
def testPrintItem(self):
item = self.session.query(PrintItem).filter(PrintItem.key == 'p1').one()
assert item.title == 'Possession', 'Title mismatch'
def testDigitalItem(self):
item2 = self.session.query(DigitalItem).filter(DigitalItem.key == 'd2').one()
assert item2.title == 'City Ballads', 'Title mismatch'
def setUp(self):
Base.metadata.create_all()
self.session = DBSession()
self.session.add_all(self.test_items)
self.session.commit()
def tearDown(self):
self.session.close()
Base.metadata.drop_all()
if __name__ == '__main__':
unittest.main()
UPDATE
Here is the working code snippet.
# -*- coding: utf-8 -*-
import time
import unittest
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import *
Base = declarative_base()
engine = create_engine('sqlite:///testdb', echo=False)
DBSession = sessionmaker(bind=engine)
class ItemMixin(object):
"""
Commons attributes for items, ie books, DVD:s...
"""
__tablename__ = 'testitems'
__table_args__ = {'extend_existing':True}
id = Column(Integer, autoincrement=True, primary_key=True)
key = Column(Unicode(16), unique=True, nullable=False)
title = Column(UnicodeText, default=None)
item_type = Column(Unicode(20), default=None)
__mapper_args__ = {'polymorphic_on': item_type}
def __init__(self, key, title=None):
self.key = key
self.title = title
class FooItem(Base, ItemMixin):
foo = Column(UnicodeText, default=None)
__mapper_args__ = {'polymorphic_identity':'foo'}
def __init__(self, foo=None, **kwargs):
ItemMixin.__init__(self, **kwargs)
self.foo = foo
class BarItem(Base, ItemMixin):
bar = Column(UnicodeText, default=None)
__mapper_args__ = {'polymorphic_identity':'bar'}
def __init__(self, bar=None, **kwargs):
ItemMixin.__init__(self, **kwargs)
self.bar = bar
# Tests
class TestSchema(unittest.TestCase):
# Class variables
is_setup = False
session = None
metadata = None
test_items = [
FooItem(key='f1', title='Possession', foo='Hello'),
FooItem(key='f2', title='Andrzej Żuławsk', foo='World'),
BarItem(key='b1', title='Wikipedia', bar='World'),
BarItem(key='b2', title='City Ballads', bar='Hello'),
]
def testFooItem(self):
print ('Test Foo Item')
item = self.__class__.session.query(FooItem).filter(FooItem.key == 'f1').first()
assert item.title == 'Possession', 'Title mismatch'
def testBarItem(self):
print ('Test Bar Item')
item = self.__class__.session.query(BarItem).filter(BarItem.key == 'b2').first()
assert item.title == 'City Ballads', 'Title mismatch'
def setUp(self):
if not self.__class__.is_setup:
self.__class__.session = DBSession()
self.metadata = Base.metadata
self.metadata.bind = engine
self.metadata.drop_all() # Drop table
self.metadata.create_all() # Create tables
self.__class__.session.add_all(self.test_items) # Add data
self.__class__.session.commit() # Commit
self.__class__.is_setup = True
def tearDown(self):
if self.__class__.is_setup:
self.__class__.session.close()
# Just for Python >=2.7 or >=3.2
#classmethod
def setUpClass(cls):
pass
#Just for Python >=2.7 or >=3.2
#classmethod
def tearDownClass(cls):
pass
if __name__ == '__main__':
unittest.main()
The most likely reason for this behavior is the fact that that data is not properly cleaned up between the tests. This explains why when you run only one test, it works.
setUp is called before every test, and tearDown - after.
Depending on what you would like to achieve, you have two options:
create data only once for all test.
In this case you if you had Python-2.7+ or Python-3.2+, you could use tearDownClass method. In your case you can handle it with a boolean class variable to prevent the code you have in setUp running more then once.
re-create data before every test
In this case you need to make sure that in the tearDown you delete all the data. This is what you are not doing right now, and I suspect that when the second test is ran, the call to one() fails not because it does not find an object, but because it finds more two objects matching the criteria.
Check the output of this code to understand the call sequence:
import unittest
class TestSchema(unittest.TestCase):
def testOne(self):
print '==testOne'
def testTwo(self):
print '==testTwo'
def setUp(self):
print '>>setUp'
def tearDown(self):
print '<<tearDown'
#classmethod
def setUpClass():
print '>>setUpClass'
#classmethod
def tearDownClass():
print '<<tearDownClass'
if __name__ == '__main__':
unittest.main()
Output:
>>setUp
==testOne
<<tearDown
>>setUp
==testTwo
<<tearDown
I have this as my tearDown method and it does work fine for my tests:
def tearDown (self):
"""Cleans up after each test case."""
sqlalchemy.orm.clear_mappers()