model.py:
from django.db import models
from datetime import datetime
from django.db.models import TextField, JSONField, Model
# Create your models here.
class reservation(models.Model):
res=models.JSONField()
da = models.DateTimeField(default=datetime.now, blank=True)
tasks.py:
#shared_task
def ress():
content={
"customer": 48,
"reservation_id_pms": str(id),
"reservation_channel_number": None,
"reservation_group_id_pms": "ed2b9d55-46d9-4471-a1e9-ad6c00e30661",
"extra_reservation_code": "550ca1c1",
}
reservations=reservation.objects.create(res=content)
reservations.save()
res.append(content)
return None
error:
from django.db.models import TextField, JSONField, Model ImportError:
cannot import name 'JSONField' from 'django.db.models'
(/usr/lib/python3/dist-packages/django/db/models/init.py)
It is possible that you are using lower versions of Django (< 1.9) when JSONField was not introduced. So try upgrading the version.
If this is not the case, then try installing simplejson library as sometimes the library may not come with Django itself.
pip install simplejson
Related
I'm currently trying to run a deep learning tool software that was previously created by someone else a few years ago. While trying to load a class called Evaluator which wraps all of the important mmdetection functions, I keep getting the following error:
enter image description here
The model was downloaded automatically while running the code due to the following part of the config file:
model = dict(
type='FCOS',
pretrained='open-mmlab://detectron/resnet101_caffe',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False,
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=15,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
I'm not sure how to determine if the model I'm trying to load and the state dictionary are compatible or how to fix this problem. I'm new to deep learning and using MMdetection.
Here is part of the code from the utils.py file that contains the Evaluator class:
from skimage.draw import rectangle_perimeter
import skimage.io as io
from skimage.transform import resize
import numpy as np
import skimage
import pickle
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
from mmcv.runner import load_checkpoint
import mmcv
from mmdet.datasets.pipelines import Compose # TO LOOK AT
from mmcv.parallel import collate, scatter
from mmdet.core import bbox2result
from skimage import data, io, filters
from matplotlib.pyplot import figure
import os
class_to_number = {"Yeast White": 0, "Budding White": 1, "Yeast Opaque": 2,
"Budding Opaque":3,"Yeast Gray": 4, "Budding Gray": 5,
"Shmoo":6,"Artifact": 7, "Unknown ": 8,
"Pseudohyphae": 9, "Hyphae": 10, "H-junction": 11,
"P-junction":12,"P-Start":13,"H-Start":14}
number_to_class = {y:x for x,y in class_to_number.items()}
class Evaluator():
def __init__(self,config,checkpoint_file):
self.cfg = Config.fromfile(config)
self.cfg["gpu-ids"] = 6
self.model = build_detector(
self.cfg.model, train_cfg=self.cfg.train_cfg, test_cfg=self.cfg.test_cfg)
checkpoint_dict = load_checkpoint(self.model,checkpoint_file)
state_dict = checkpoint_dict["state_dict"]
self.model.CLASSES = checkpoint_dict['meta']['CLASSES']
self.model.load_state_dict(state_dict)
self.model.eval()
I looked at the version of mmdet, mmcv, and pytorch to ensure they were the same versions that were used by the original creator of the software. I redownloaded the model file to ensure that it wasn't corrupted. `
It is normal that the model and loaded state dict do not match exactly, because the fully connected layers in the pretrained models are unused. It will not affect the training. If it causes any further issues while testing, then this is a problem otherwise you should be good.
Refer to the issue here.
This is a continuation of this question.
As my flask app should not write anything in my database, I set up Flask-SQLAlchemy to reflect my database. This way I do not have to change my models, when I change my schema:
# app/__init__.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
db.init_app(app)
with app.app_context():
db.Model.metadata.reflect(db.engine)
# app/models.py
from app import db
class Data(db.Model):
__table__ = db.Model.metadata.tables['data']
But now, if I have to import the Model before I created the app, I run into Errors because the metadata is not set yet. This is a problem when it comes to testing for example:
# test.py
import unittest
from app import create_app, db
from app.models import Data
class TestGUI(unittest.TestCase):
#classmethod
def setUpClass(cls):
cls.app = create_app()
# etc ...
This throws KeyError: 'data' in __table__ = db.Model.metadata.tables['data'] when importing from app.models as the metadata is not correctly set before the create_app() function is run.
I did find a solution (thanks to #snakecharmerb). The solution is simply to avoid the problem, to not import app.models before running create_app(). A bit hacky, so feel free to post an answer as well, when you have a better solution.
My test file now looks like this:
# test.py
import unittest
from app import create_app, db
app = create_app()
from app.models import Data
class TestGUI(unittest.TestCase):
#classmethod
def setUpClass(cls):
cls.app = app
# etc ...
I am trying to create an API for customer churn at a bank. I have completed the model and now want to create the API using FastAPI. My problem is converting the JSON passed data to a dataframe to be able to run it through the model. Here is the code.
from fastapi import FastAPI
from starlette.middleware.cors import CORSMiddleware
from pycaret.classification import *
import pandas as pd
import uvicorn # ASGI
import pickle
import pydantic
from pydantic import BaseModel
class customer_input(BaseModel):
CLIENTNUM:int
Customer_Age:int
Gender:str
Dependent_count:int
Education_Level:str
Marital_Status:str
Income_Category:str
Card_Category:str
Months_on_book:int
Total_Relationship_Count:int
Months_Inactive_12_mon:int
Contacts_Count_12_mon:int
Credit_Limit:float
Total_Revolving_Bal:int
Avg_Open_To_Buy:float
Total_Amt_Chng_Q4_Q1:float
Total_Trans_Amt:int
Total_Trans_Ct:int
Total_Ct_Chng_Q4_Q1:float
Avg_Utilization_Ratio:float
app = FastAPI()
#Loading the saved model from pycaret
model = load_model('BankChurnersCatboostModel25thDec2020')
origins = [
'*'
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=['GET','POST'],
allow_headers=['Content-Type','application/xml','application/json'],
)
#app.get("/")
def index():
return {"Nothing to see here"}
#app.post("/predict")
def predict(data: customer_input):
# Convert input data into a dictionary
data = data.dict()
# Convert the dictionary into a dataframe
my_data = pd.DataFrame([data])
# Predicting using pycaret
prediction = predict_model(model, my_data)
return prediction
# Only use below 2 lines when testing on localhost -- remove when deploying
if __name__ == '__main__':
uvicorn.run(app, host='127.0.0.1', port=8000)
When I test this out I get the Internal Server Error from the OpenAPI interface so I check my cmd and the error says
ValueError: [TypeError("'numpy.int64' object is not iterable"), TypeError('vars() argument must have __dict__ attribute')]
How can I have the data that is passed into the predict function successfully convert into a dataframe. Thank you.
Ok so I fixed this by changing the customer_input class. Any int types I changed to a float and that fixed it. I don't understand why though. Can anyone explain?
Fundamentally those int values are only meant to be an integer because they are all discrete values (i.e choosing number of dependents in a bank) but I guess I could put a constrain on the front-end.
I want to create a LDA topic model and am using SpaCy to do so, following a tutorial. The error I receive when I try to use spacy is one I cannot find on google, so I'm hoping someone here knows what it's about.
I'm running this code on Anaconda:
import numpy as np
import pandas as pd
import re, nltk, spacy, gensim
# Sklearn
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV
from pprint import pprint
# Plotting tools
import pyLDAvis
import pyLDAvis.sklearn
import matplotlib.pyplot as plt
df = pd.DataFrame(data)
def sent_to_words(sentences):
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=True))
# deacc=True removes punctuations
data_words = list(sent_to_words(data))
print(data_words[:1])
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append(" ".join([token.lemma_ if token.lemma_ not in ['-PRON-'] else '' for token in doc if token.pos_ in allowed_postags]))
return texts_out
nlp = spacy.load('en', disable=['parser', 'ner'])
# Do lemmatization keeping only Noun, Adj, Verb, Adverb
data_lemmatized = lemmatization(data_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print(data_lemmatized[:1])
And I receive the following error:
File "C:\Users\maart\AppData\Local\Continuum\anaconda3\lib\site-packages\_regex_core.py", line 1880, in get_firstset
raise _FirstSetError()
_FirstSetError
The error must occur somewhere after the lemmatization, because the other parts work fine.
Thanks a bunch!
I had this same issue and I was able to resolve it by uninstalling regex (I had the wrong version installed) and then running python -m spacy download en again. This will reinstall the correct version of regex.
I get an error I don't understand when I do session commit after a deletion like this: (in a shell with flask app context or anywhere while running the app)
>>> from app.extensions import db
>>> from app.models.user import User
>>> user = User.query.all()[0]
>>> db.session.delete(user)
>>> db.session.commit()
File
"/Users/hugo/Dropbox/lahey/api/.venv/lib/python3.6/site-packages/sqlalchemy/util/langhelpers.py",
line 962, in module
% (self._il_path, self._il_addtl)) ImportError: importlater.resolve_all() hasn't been called (this is sqlalchemy.orm
strategy_options)
My model for the object I try to delete looks like this:
import datetime
from sqlalchemy_utils.types.password import PasswordType
from sqlalchemy_utils import force_auto_coercion
from app.extensions import db
# Setup coercion of passwords
force_auto_coercion()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(PasswordType(schemes=['pbkdf2_sha512']), nullable=False)
name = db.Column(db.String(256))
created_at = db.Column(db.DateTime, default=datetime.datetime.now)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.now)
Deleting objects of other models works fine. Could this somehow be because I'm using the PasswordType column from sqlalchemy_utils?
If you are using Flask, the docsting says you are wrong configuring the Column:
Lazy configuration of the type with Flask config:
import flask
from sqlalchemy_utils import PasswordType, force_auto_coercion
force_auto_coercion()
class User(db.Model):
__tablename__ = 'user'
password = db.Column(
PasswordType(
# The returned dictionary is forwarded to the CryptContext
onload=lambda **kwargs: dict(
schemes=flask.current_app.config['PASSWORD_SCHEMES'],
**kwargs
),
),
unique=False,
nullable=False,
)
I've worked out what caused this. I've been using package sqlalchemy_bulk_lazy_loader
which had a bug (strategy_options was not imported in a correct way). The issue is now fixed in the package
See full sqlalchemy mail list thread for full details