json field in django-haystack with elasticsearch engine - json

How can I store json in elasticsearch by django-haystack fields?
At the moment I am using following trick:
# search_indexes.py
class BlogPostIndex(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField()
#staticmethod
def prepare_author(obj):
return json.dumps(serializers.UserMinimalSerializer(obj.author).data)
# serializers.py
class BlogPostSearchSerializer(HaystackSerializer):
author = serializers.SerializerMethodField()
#staticmethod
def get_author(obj):
return json.loads(obj.author)

Related

How do I make a dict subclass json serializable?

I can represent the my Simple_Dict_Subclass and List_Subclass with json.dumps, but not Custom_Dict_Subclass. When json.dumps is called on List_Subclass its __iter__ method is called, so I reasoned that json.dumps would call a dictionary's items method. And items is called in Simple_Dict_Subclass but not Custom_Dict_Subclass. How can I make my Custom_Dict_Subclass json serializable like Simple_Dict_Subclass?
import json
class Custom_Dict_Subclass(dict):
def __init__(self):
self.data = {}
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def __str__(self):
return str(self.data)
def items(self):
print("'Items' called from Custom_Dict_Subclass")
yield from self.data.items()
class Simple_Dict_Subclass(dict):
def __setitem__(self, key, value):
super().__setitem__(key, value)
def __getitem__(self, key):
return super().__getitem__(key)
def __str__(self):
return super().__str__()
def items(self):
print("'Items' called from Simple_Dict_Subclass")
yield from super().items()
class List_Subclass(list):
def __init__(self):
self.data = []
def __setitem__(self, index, value):
self.data[index] = value
def __getitem__(self, index):
return self.data[index]
def __str__(self):
return str(self.data)
def __iter__(self):
yield from self.data
def append(self, value):
self.data.append(value)
d = Custom_Dict_Subclass()
d[0] = None
print(d) # Works
print(json.dumps(d)) # Does't work
d = Simple_Dict_Subclass()
d[0] = None
print(d) # Works
print(json.dumps(d)) # Works
l = List_Subclass()
l.append(None)
print(l) # Works
print(json.dumps(l)) # Works
Output:
{0: None} # Custom dict string working
{} # Custom dict json.dumps not working
{0: None} # Simple dict string working
'Items' called from Simple_Dict_Subclass
{"0": null} # Simple dict json.dumps working
[None] # List string working
[null] # List json.dumps working
Generally speaking, it is not safe to assume that json.dumps will
trigger the items method of the dictionary. This is how it is
implemented but you cannot rely on that.
In your case, the Custom_Dict_Subclass.items is never called because
(key, value) pairs are not added to the dict object but to its
data attribute.
To fix that you need to invoke the super methods in
Custom_Dict_Subclass:
class Custom_Dict_Subclass(dict):
def __init__(self):
dict.__init__(self)
self.data = {}
def __setitem__(self, key, value):
self.data[key] = value
super().__setitem__(key, value)
The object is dumped correctly, but of course, (key, value) will then
be stored twice: in the dict object and in its data attribute.
In that situation, it is better to define a sub class of
json.JSONEncoder to implement the translation of a
Custom_Dict_Subclass object to a json serialisable object and to
give this class as the keyword argument cls of json.dumps:
import json
class Custom_Dict_Subclass:
def __init__(self):
self.data = {}
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def __str__(self):
return str(self.data)
def items(self):
print("'Items' called from Custom_Dict_Subclass")
yield from self.data.items()
class CustomDictEncoder(json.JSONEncoder):
def default(self, obj):
"""called by json.dumps to translate an object obj to
a json serialisable data"""
if isinstance(obj, Custom_Dict_Subclass):
return obj.data
return json.JSONEncoder.default(self, obj)
d = Custom_Dict_Subclass()
d[0] = None
print(json.dumps(d, cls=CustomDictEncoder))

How to use nested pydantic models for sqlalchemy in a flexible way

from fastapi import Depends, FastAPI, HTTPException, Body, Request
from sqlalchemy import create_engine, Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker, relationship
from sqlalchemy.inspection import inspect
from typing import List, Optional
from pydantic import BaseModel
import json
SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
app = FastAPI()
# sqlalchemy models
class RootModel(Base):
__tablename__ = "root_table"
id = Column(Integer, primary_key=True, index=True)
someRootText = Column(String)
subData = relationship("SubModel", back_populates="rootData")
class SubModel(Base):
__tablename__ = "sub_table"
id = Column(Integer, primary_key=True, index=True)
someSubText = Column(String)
root_id = Column(Integer, ForeignKey("root_table.id"))
rootData = relationship("RootModel", back_populates="subData")
# pydantic models/schemas
class SchemaSubBase(BaseModel):
someSubText: str
class Config:
orm_mode = True
class SchemaSub(SchemaSubBase):
id: int
root_id: int
class Config:
orm_mode = True
class SchemaRootBase(BaseModel):
someRootText: str
subData: List[SchemaSubBase] = []
class Config:
orm_mode = True
class SchemaRoot(SchemaRootBase):
id: int
class Config:
orm_mode = True
class SchemaSimpleBase(BaseModel):
someRootText: str
class Config:
orm_mode = True
class SchemaSimple(SchemaSimpleBase):
id: int
class Config:
orm_mode = True
Base.metadata.create_all(bind=engine)
# database functions (CRUD)
def db_add_simple_data_pydantic(db: Session, root: SchemaRootBase):
db_root = RootModel(**root.dict())
db.add(db_root)
db.commit()
db.refresh(db_root)
return db_root
def db_add_nested_data_pydantic_generic(db: Session, root: SchemaRootBase):
# this fails:
db_root = RootModel(**root.dict())
db.add(db_root)
db.commit()
db.refresh(db_root)
return db_root
def db_add_nested_data_pydantic(db: Session, root: SchemaRootBase):
# start: hack: i have to manually generate the sqlalchemy model from the pydantic model
root_dict = root.dict()
sub_dicts = []
# i have to remove the list form root dict in order to fix the error from above
for key in list(root_dict):
if isinstance(root_dict[key], list):
sub_dicts = root_dict[key]
del root_dict[key]
# now i can do it
db_root = RootModel(**root_dict)
for sub_dict in sub_dicts:
db_root.subData.append(SubModel(**sub_dict))
# end: hack
db.add(db_root)
db.commit()
db.refresh(db_root)
return db_root
def db_add_nested_data_nopydantic(db: Session, root):
print(root)
sub_dicts = root.pop("subData")
print(sub_dicts)
db_root = RootModel(**root)
for sub_dict in sub_dicts:
db_root.subData.append(SubModel(**sub_dict))
db.add(db_root)
db.commit()
db.refresh(db_root)
# problem
"""
if I would now "return db_root", the answer would be of this:
{
"someRootText": "string",
"id": 24
}
and not containing "subData"
therefore I have to do the following.
Why?
"""
from sqlalchemy.orm import joinedload
db_root = (
db.query(RootModel)
.options(joinedload(RootModel.subData))
.filter(RootModel.id == db_root.id)
.all()
)[0]
return db_root
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
#app.post("/addNestedModel_pydantic_generic", response_model=SchemaRootBase)
def addSipleModel_pydantic_generic(root: SchemaRootBase, db: Session = Depends(get_db)):
data = db_add_simple_data_pydantic(db=db, root=root)
return data
#app.post("/addSimpleModel_pydantic", response_model=SchemaSimpleBase)
def add_simple_data_pydantic(root: SchemaSimpleBase, db: Session = Depends(get_db)):
data = db_add_simple_data_pydantic(db=db, root=root)
return data
#app.post("/addNestedModel_nopydantic")
def add_nested_data_nopydantic(root=Body(...), db: Session = Depends(get_db)):
data = db_add_nested_data_nopydantic(db=db, root=root)
return data
#app.post("/addNestedModel_pydantic", response_model=SchemaRootBase)
def add_nested_data_pydantic(root: SchemaRootBase, db: Session = Depends(get_db)):
data = db_add_nested_data_pydantic(db=db, root=root)
return data
Description
My Question is:
How to make nested sqlalchemy models from nested pydantic models (or python dicts) in a generic way and write them to the database in "one shot".
My example model is called RootModel and has a list of submodels called "sub models" in subData key.
Please see above for pydantic and sqlalchemy definitions.
Example:
The user provides a nested json string:
{
"someRootText": "string",
"subData": [
{
"someSubText": "string"
}
]
}
Open the browser and call the endpoint /docs.
You can play around with all endpoints and POST the json string from above.
/addNestedModel_pydantic_generic
When you call the endpoint /addNestedModel_pydantic_generic it will fail, because sqlalchemy cannot create the nested model from pydantic nested model directly:
AttributeError: 'dict' object has no attribute '_sa_instance_state'
​/addSimpleModel_pydantic
With a non-nested model it works.
The remaining endpoints are showing "hacks" to solve the problem of nested models.
/addNestedModel_pydantic
In this endpoint is generate the root model and andd the submodels with a loop in a non-generic way with pydantic models.
/addNestedModel_pydantic
In this endpoint is generate the root model and andd the submodels with a loop in a non-generic way with python dicts.
My solutions are only hacks, I want a generic way to create nested sqlalchemy models either from pydantic (preferred) or from a python dict.
Environment
OS: Windows,
FastAPI Version : 0.61.1
Python version: Python 3.8.5
sqlalchemy: 1.3.19
pydantic : 1.6.1
I haven't found a nice built-in way to do this within pydantic/SQLAlchemy. How I solved it: I gave every nested pydantic model a Meta class containing the corresponding SQLAlchemy model. Like so:
from pydantic import BaseModel
from models import ChildDBModel, ParentDBModel
class ChildModel(BaseModel):
some_attribute: str = 'value'
class Meta:
orm_model = ChildDBModel
class ParentModel(BaseModel):
child: SubModel
That allowed me to write a generic function that loops through the pydantic object and transforms submodels into SQLAlchemy models:
def is_pydantic(obj: object):
"""Checks whether an object is pydantic."""
return type(obj).__class__.__name__ == "ModelMetaclass"
def parse_pydantic_schema(schema):
"""
Iterates through pydantic schema and parses nested schemas
to a dictionary containing SQLAlchemy models.
Only works if nested schemas have specified the Meta.orm_model.
"""
parsed_schema = dict(schema)
for key, value in parsed_schema.items():
try:
if isinstance(value, list) and len(value):
if is_pydantic(value[0]):
parsed_schema[key] = [schema.Meta.orm_model(**schema.dict()) for schema in value]
else:
if is_pydantic(value):
parsed_schema[key] = value.Meta.orm_model(**value.dict())
except AttributeError:
raise AttributeError("Found nested Pydantic model but Meta.orm_model was not specified.")
return parsed_schema
The parse_pydantic_schema function returns a dictionary representation of the pydantic model where submodels are substituted by the corresponding SQLAlchemy model specified in Meta.orm_model. You can use this return value to create the parent SQLAlchemy model in one go:
parsed_schema = parse_pydantic_schema(parent_model) # parent_model is an instance of pydantic ParentModel
new_db_model = ParentDBModel(**parsed_schema)
# do your db actions/commit here
If you want you can even extend this to also automatically create the parent model, but that requires you to also specify the Meta.orm_model for all pydantic models.
Using a validators is a lot simpler:
SQLAlchemy models.py:
class ChildModel(Base):
__tablename__ = "Child"
name: str = Column(Unicode(255), nullable=False, primary_key=True)
class ParentModel(Base):
__tablename__ = "Parent"
some_attribute: str = Column(Unicode(255))
children = relationship("Child", lazy="joined", cascade="all, delete-orphan")
#validates("children")
def adjust_children(self, _, value) -> ChildModel:
"""Instantiate Child object if it is only plain string."""
if value and isinstance(value, str):
return ChildModel(some_attribute=value)
return value
Pydantic schema.py:
class Parent(BaseModel):
"""Model used for parents."""
some_attribute: str
children: List[str] = Field(example=["foo", "bar"], default=[])
#validator("children", pre=True)
def adjust_children(cls, children):
"""Convert to plain string if it is a Child object."""
if children and not isinstance(next(iter(children), None), str):
return [child["name"] for child in children]
return children
Nice function #dann, for more than two level of nesting you can use this recursive function :
def pydantic_to_sqlalchemy_model(schema):
"""
Iterates through pydantic schema and parses nested schemas
to a dictionary containing SQLAlchemy models.
Only works if nested schemas have specified the Meta.orm_model.
"""
parsed_schema = dict(schema)
for key, value in parsed_schema.items():
try:
if isinstance(value, list) and len(value) and is_pydantic(value[0]):
parsed_schema[key] = [
item.Meta.orm_model(**pydantic_to_sqlalchemy_model(item))
for item in value
]
elif is_pydantic(value):
parsed_schema[key] = value.Meta.orm_model(
**pydantic_to_sqlalchemy_model(value)
)
except AttributeError:
raise AttributeError(
f"Found nested Pydantic model in {schema.__class__} but Meta.orm_model was not specified."
)
return parsed_schema
Use it sparingly ! is you have a cyclical nesting it will loop forever.
And then call you data transformer like this :
def create_parent(db: Session, parent: Parent_pydantic_schema):
db_parent = Parent_model(**pydantic_to_sqlalchemy_model(intent))
db.add(db_parent)
db.commit()
return db_parent

ID collision with Graphene-SQLAlchemy Interface class plus Node Interface

I've written Graphene models for polymorphic entities represented in my database w/SQLalchemy.
The problem is simple:
I want to create an interface that reflects my SQLAlchemy models for Graphene but also either a) implements Node or b) does not conflict with Node and allows me to retrieve the model's ID without needing to add ... on Node {id} to the query string.
have to exclude the ID field from my ORM-based interface or field conflicts with the Node interface, by doing so in order to get the ID then you need to add ...on Node { id }, which is ugly.
I created an SQLAlchemyInterface object that extends graphene.Interface. Many (but not all) of my models used this as well as Node as interfaces. The first problem was that this contains an ID field and it conflicted with the Node interface.
I excluded the id field to not interfere with Node, but then found I could not directly query ID on my models anymore, and had to add ... on Node {id} to the query string.
I then decided to have this SQLAlchemyInterface extend Node. I don't love this approach because I need to use another (named) Node interface for all of my models that don't necessarily need to implement SQLAlchemyInterface
class SQLAlchemyInterface(Node):
#classmethod
def __init_subclass_with_meta__(
cls,
model=None,
registry=None,
only_fields=(),
exclude_fields=(),
connection_field_factory=default_connection_field_factory,
_meta=None,
**options
):
_meta = SQLAlchemyInterfaceOptions(cls)
_meta.name = f'{cls.__name__}Node'
autoexclude_columns = exclude_autogenerated_sqla_columns(model=model)
exclude_fields += autoexclude_columns
assert is_mapped_class(model), (
"You need to pass a valid SQLAlchemy Model in " '{}.Meta, received "{}".'
).format(cls.__name__, model)
if not registry:
registry = get_global_registry()
assert isinstance(registry, Registry), (
"The attribute registry in {} needs to be an instance of "
'Registry, received "{}".'
).format(cls.__name__, registry)
sqla_fields = yank_fields_from_attrs(
construct_fields(
model=model,
registry=registry,
only_fields=only_fields,
exclude_fields=exclude_fields,
connection_field_factory=connection_field_factory
),
_as=Field
)
if not _meta:
_meta = SQLAlchemyInterfaceOptions(cls)
_meta.model = model
_meta.registry = registry
connection = Connection.create_type(
"{}Connection".format(cls.__name__), node=cls)
assert issubclass(connection, Connection), (
"The connection must be a Connection. Received {}"
).format(connection.__name__)
_meta.connection = connection
if _meta.fields:
_meta.fields.update(sqla_fields)
else:
_meta.fields = sqla_fields
super(SQLAlchemyInterface, cls).__init_subclass_with_meta__(_meta=_meta, **options)
#classmethod
def Field(cls, *args, **kwargs): # noqa: N802
return NodeField(cls, *args, **kwargs)
#classmethod
def node_resolver(cls, only_type, root, info, id):
return cls.get_node_from_global_id(info, id, only_type=only_type)
#classmethod
def get_node_from_global_id(cls, info, global_id, only_type=None):
try:
node: DeclarativeMeta = one_or_none(session=info.context.get('session'), model=cls._meta.model, id=global_id)
return node
except Exception:
return None
#staticmethod
def from_global_id(global_id):
return global_id
#staticmethod
def to_global_id(type, id):
return id
Interface impls, Models + Query code examples:
class CustomNode(Node):
class Meta:
name = 'UuidNode'
#staticmethod
def to_global_id(type, id):
return '{}:{}'.format(type, id)
#staticmethod
def get_node_from_global_id(info, global_id, only_type=None):
type, id = global_id.split(':')
if only_type:
# We assure that the node type that we want to retrieve
# is the same that was indicated in the field type
assert type == only_type._meta.name, 'Received not compatible node.'
if type == 'User':
return one_or_none(session=info.context.get('session'), model=User, id=global_id)
elif type == 'Well':
return one_or_none(session=info.context.get('session'), model=Well, id=global_id)
class ControlledVocabulary(SQLAlchemyInterface):
class Meta:
name = 'ControlledVocabularyNode'
model = BaseControlledVocabulary
class TrackedEntity(SQLAlchemyInterface):
class Meta:
name = 'TrackedEntityNode'
model = TrackedEntityModel
class Request(SQLAlchemyObjectType):
"""Request node."""
class Meta:
model = RequestModel
interfaces = (TrackedEntity,)
class User(SQLAlchemyObjectType):
"""User Node"""
class Meta:
model = UserModel
interfaces = (CustomNode,)
class CvFormFieldValueType(SQLAlchemyObjectType):
class Meta:
model = CvFormFieldValueTypeModel
interfaces = (ControlledVocabulary,)
common_field_kwargs = {'id': graphene.UUID(required=False), 'label': graphene.String(required=False)}
class Query(graphene.ObjectType):
"""Query objects for GraphQL API."""
node = CustomNode.Field()
te_node = TrackedEntity.Field()
cv_node = ControlledVocabulary.Field()
# Non-Tracked Entities:
users: List[User] = SQLAlchemyConnectionField(User)
# Generic Query for any Tracked Entity:
tracked_entities: List[TrackedEntity] = FilteredConnectionField(TrackedEntity, sort=None, filter=graphene.Argument(TrackedEntityInput))
# Generic Query for any Controlled Vocabulary:
cv: ControlledVocabulary = graphene.Field(ControlledVocabulary, controlled_vocabulary_type_id=graphene.UUID(required=False),
base_entry_key=graphene.String(required=False),
**common_field_kwargs)
cvs: List[ControlledVocabulary] = FilteredConnectionField(ControlledVocabulary, sort=None, filter=graphene.Argument(CvInput))
#staticmethod
def resolve_with_filters(info: ResolveInfo, model: Type[SQLAlchemyObjectType], **kwargs):
query = model.get_query(info)
log.debug(kwargs)
for filter_name, filter_value in kwargs.items():
model_filter_column = getattr(model._meta.model, filter_name, None)
log.debug(type(filter_value))
if not model_filter_column:
continue
if isinstance(filter_value, SQLAlchemyInputObjectType):
log.debug(True)
filter_model = filter_value.sqla_model
q = FilteredConnectionField.get_query(filter_model, info, sort=None, **kwargs)
# noinspection PyArgumentList
query = query.filter(model_filter_column == q.filter_by(**filter_value))
log.info(query)
else:
query = query.filter(model_filter_column == filter_value)
return query
def resolve_tracked_entity(self, info: ResolveInfo, **kwargs):
entity: TrackedEntity = Query.resolve_with_filters(info=info, model=BaseTrackedEntity, **kwargs).one()
return entity
def resolve_tracked_entities(self, info, **kwargs):
query = Query.resolve_with_filters(info=info, model=BaseTrackedEntity, **kwargs)
tes: List[BaseTrackedEntity] = query.all()
return tes
def resolve_cv(self, info, **kwargs):
cv: List[BaseControlledVocabulary] = Query.resolve_with_filters(info=info, model=BaseControlledVocabulary, **kwargs).one()
log.info(cv)
return cv
def resolve_cvs(self, info, **kwargs):
cv: List[BaseControlledVocabulary] = Query.resolve_with_filters(info=info, model=BaseControlledVocabulary, **kwargs).all()
return cv
schema:
schema = Schema(query=Query, types=[*tracked_members, *cv_members])
I would like to be able to not extend Node with SQLAlchemyInterface and rather add Node back to the list of interfaces for TrackedEntity and ControlledVocabulary but be able to perform a query like this:
query queryTracked {
trackedEntities{
id
(other fields)
... on Request {
(request specific fields)
}
}

Serialization through the Django Rest Framework - how to deserialize?

I have an array of the objects and then try to serialize it using the following statement:
serializer = MovieWithDescriptionSerializer(movies, many=True)
data = serializer.data
The class and the serializer are as below:
class MovieWithDescription(object):
id = 0
name = ''
description = ''
rating = ''
year = 0
def __init__(self, uid, name, description):
self.id = uid
self.name = name
self.description = description
class MovieWithDescriptionSerializer(serializers.Serializer):
class Meta:
model = MovieWithDescription
fields = ('id', 'name', 'description')
id = serializers.IntegerField()
name = serializers.StringRelatedField()
description = serializers.StringRelatedField()
The data is saved to session:
request.session['movies'] = data
And read on the other page:
movies = request.session['movies']
However when I tried to deserialize it I learned that the movies variable contains list. So it looks like I don't need to deserialize and just need to iterate through the list to process the data. What I'm doing wrong with this serialization? Is there any more simple way to serialize data than to use Django Rest Framework?
Answering your question from the comments section; also to clarify for whoever is migrating from a Java environment:
In Django Rest Framework; Serialization happens as follows:
Python Object --> serializer.data ==> Python Native Types --> Renderer Class ==> JSON
serializer.data is constructed by calling to_representation on each of the fields declared on the Serializer.
#property
def data(self):
...
if not hasattr(self, '_data'):
if self.instance is not None and not getattr(self, '_errors', None):
self._data = self.to_representation(self.instance)
elif hasattr(self, '_validated_data') and not getattr(self, '_errors', None):
self._data = self.to_representation(self.validated_data)
else:
self._data = self.get_initial()
return self._data
To representation is supposed to take an object instance and return a dict of primitive types:
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
"""
...
So if serializer.data does not render to JSON, where does this magic happen?
It happens inside the Response object. When you construct a Response object with the data attribute (data here is a dict of primitive or a list of dict of primitives), the rendered_content method then defines how the data is rendered, sets the appropriate Content-Type headers, and so on.
#property
def rendered_content(self):
renderer = getattr(self, 'accepted_renderer', None)
accepted_media_type = getattr(self, 'accepted_media_type', None)
context = getattr(self, 'renderer_context', None)
assert renderer, ".accepted_renderer not set on Response"
assert accepted_media_type, ".accepted_media_type not set on Response"
assert context is not None, ".renderer_context not set on Response"
context['response'] = self
media_type = renderer.media_type
charset = renderer.charset
content_type = self.content_type
if content_type is None and charset is not None:
content_type = "{0}; charset={1}".format(media_type, charset)
elif content_type is None:
content_type = media_type
self['Content-Type'] = content_type
ret = renderer.render(self.data, accepted_media_type, context)
if isinstance(ret, six.text_type):
assert charset, (
'renderer returned unicode, and did not specify '
'a charset value.'
)
return bytes(ret.encode(charset))
if not ret:
del self['Content-Type']
return ret
This allows you to do something neat; you can define a single Django Rest Framework View, that can render XML, JSON, and many more -> you can find them under rest_framework.renderers.
For example, you can define an APIView that supports multiple render formats based on the query parameter (?format=json, ?format=xml, ?format=csv) as follows:
class ExampleAPIView(APIView):
renderer_classes = (JSONRenderer, XMLRenderer, CSVRenderer)
...
XML and CSV require additional package installations. Read more here: http://www.django-rest-framework.org/api-guide/renderers/#xml

Django - receiving multiple varities of json objects in the same post method

from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import QandA
from .serializers import QandASerializer
import json
import random
from itertools import count
class QandAlist(APIView):
_ids = count(0)
def __init__(self):
self.id = next(self._ids)
def get(self, request):
questions = QandA.objects.all()
serializer = QandASerializer(questions, many=True)
return Response(serializer.data)
def post(self, request):
serializer = QandASerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
try:
n = json.loads(request.body)
return Response(n)
except:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
else:
questions = QandA.objects.order_by('?')[:n]
serializer = QandASerializer(questions, many=True)
return Response(serializer.data)
here i am trying to accept 2 kinds of json. one at a time
one which updates the database with a QandA object
other which looks like
{ "number" : 3 }
this number must be extracted and 3 random QandA objects must be returned
everything except for the "number" thing works.
the try block always fails and i get exception saying i have missed all fields of QandA object
Serializer.py file is
from rest_framework import serializers
from .models import QandA
class QandASerializer(serializers.ModelSerializer):
class Meta:
model = QandA
fields = ('question', 'answer', 'option_a', 'option_b', 'option_c')
So, a couple of things.
First, your QandASerializer does not have a number field, which is the one you are trying to POST. It also has a bunch of fields required for the serializer to be valid.
You could just not use a serializer if you receive POST data with a 'number' in it.
def post(self, request):
number = request.data.get('number')
if number:
# grab those random QandA objects and return them
questions = QandA.objects.order_by('?')[:int(number)]
serializer = QandASerializer(questions, many=True)
return Response(serializer.data)
else:
serializer = QandASerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
You may want to add additional checks to ensure that number is actually a number.