company = self.env['res.company'].search([('id', '=', 1)])
print company.name # prints 'my company' which is correct
print json.dumps(company) # error TypeError: res.company(1,) is not JSON serializable
Question is how do simply export company object in json?
I am looking for a generic way which would work for any model.
Use Model.read(). You can also specify the fields to be read in the read method (see doc). Also, datetime objects are not json serializable. Fortunately, Odoo already provides a utility method.
import json
from odoo.tools import date_utils
raw_data = company.read()
json_data = json.dumps(raw_data, default=date_utils.json_default)
print(json_data)
Please try this code:
company = self.env['res.company'].search([('id', '=', 1)])
print company.name
print json.dumps(company.name)
Search will return object, so we have to manually add fields to dictionary to build the json. I have added few fields, you can add more fields.
company = self.env['res.company'].search([('id', '=', 1)])
params = {}
data = dict()
data['partner_id'] = company.partner_id
data['name'] = company.name
data['email'] = company.email
data['phone'] = company.phone
data['company_registry'] = company.company_registry
params['params'] = data
print json.dumps(params)
The last answer (Jerther) is the right answer.
You can also use in odoo 16
from odoo.tools import json_default
instead of
from odoo.tools import date_utils
Related
I am new around here, and I need some help
so, I am trying to make a report in odoo with Base report CSV, in the table, I have 2 relational field, and I don't know how to combine those table, I tried combining the function from the module Base report CSV like below, but it give an error, a blank error which only make me confused, anyone got any idea how I could do this?
from odoo import models
import csv
class csvreport(models.AbstractModel):
_name = 'report.hr_timesheet.report'
_inherit = 'report.report_csv.abstract'
def generate_csv_report(self, writer, data, partners):
writer.writeheader()
for obj in partners:
employee = self.env.cr.execute("""select hr_employee.name where hr_employee.id = %s;""", (obj.employee_id))
task = self.env.cr.execute("""select project_task.name where project_task.id = %s;""", (obj.project_id))
writer.writerow({
'name': obj.name,
'date': obj.date,
'unit_amount': obj.unit_amount,
'responsible': employee.fetchall(),
'task': task.fetchall(),
})
def csv_report_options(self):
res = super().csv_report_options()
res['fieldnames'].append('name')
res['fieldnames'].append('date')
res['fieldnames'].append('unit_amount')
res['fieldnames'].append('responsible')
res['fieldnames'].append('task')
res['delimiter'] = ';'
res['quoting'] = csv.QUOTE_ALL
return res
The Error :
Since i can't post picture, i'll just post a gdrive link
You should see the following error message in the error log:
ValueError: SQL query parameters should be a tuple, list or dict; ...
To fix that error pass query arguments in a tuple:
employee = self.env.cr.execute(query_str, (obj.employee_id.id, ))
You can't pass obj.employee_id (a record), because psycopg2 can't adapt type hr.employee.
To get the employee name just use dot notation:
employee_name = obj.employee_id.name
The from clause is missing for both queries and you can't call fetchall on employee or task because self.env.cr.execute will return None, to fetch the result, use the cursor fetch*() methods.
self.env.cr.fetchall()
Just like the title says, here is my code:
require 'json'
def import_csv
path = Rails.root.join('folder1', 'folder2', 'file.csv')
counter = 0
puts "inserts on table started..."
CSV.foreach(path, headers: true) do |row|
next if row.to_hash['deleted_at'] != nil
counter += 1
puts row.to_json #shows correct format
someModel = someModel.new(row.to_hash) #imports incorrect format of json with backslash in db
#someModel = someModel.new(row.to_json) #ArgumentError: When assigning attributes, you must pass a hash as an argument.
someModel.skip_callbacks = true
someModel.save!
end
puts "#{counter} inserts on table apps complete"
end
import_csv
I can not import the CSV File in the correct format. The import works, but the structure is wrong.
EXPECTED
{"data":{"someData":72}}
GETTING
"{\"data\":{\"someData\":72}}"
How can I import it with the correct JSON format?
If all headers are correct as of the column names of the model
Maybe you can try:
JSON.parse(row.to_json)
I have a model like this:
class MyModel(models.Model):
details = models.JSONField()
# other fields
I want to annotate some fields from this model like this:
qs = MyModel.objects.filter(id__in=given_list).annotate(
first_name=F('details__first_name'),
last_name=F('details__last_name')
)
However the F() expression is not considering the json keys, its just returning the details field only.
I am using MySQL, so cannot use KeyTextTransform.
I tried using RawSQL like this:
qs = MyModel.objects.filter(id__in=given_list).annotate(
first_name=RawSQL("(details->%s)", ('first_name',)),
last_name=RawSQL("(details->%s)", ('last_name',))
)
But it was giving this error:
MySQLdb._exceptions.OperationalError: (3143, 'Invalid JSON path expression. The error is around character position 1.')
So what can I do to make everything work as expected?
You can use JSONExtract, it will be easier to write and understand:
from django_mysql.models.functions import JSONExtract
qs = MyModel.objects.filter(id__in=given_list).annotate(
first_name=JSONExtract('details', '$.first_name'),
last_name=JSONExtract('details', '$.last_name')
)
MySQL json extractions have a special syntax using jsonfield->"$.key". Try with this:
qs = MyModel.objects.filter(id__in=given_list).annotate(
first_name=RawSQL("(details->%s)", ('$.first_name',)),
last_name=RawSQL("(details->%s)", ('$.last_name',))
)
You can just add properties to your MyModel and have them return the corresponding information
class MyModel(models.Model):
details = models.JSONField()
#property
def first_name(self):
return self.details['first_name']
#property
def last_name(self):
return self.details['last_name']
I am getting an error when I try and "flatten" json into a dataframe, I believe it is because some of the cells have NaN in. What is the best way to handle this?
The Error I get is "AttributeError: 'float' object has no attribute 'keys'"
import pandas as pd
from pymongo import MongoClient
client = MongoClient()
client = MongoClient('mongodb://localhost:27017/')
#Import Counterparties
counterpartydb = client.counterparties
cptylist = counterpartydb.counterparties
cptylists = pd.DataFrame(list(cptylist.find()))
details = pd.DataFrame(list(cptylists['details']))
CurRating = pd.DataFrame(list(cptylists['currentRating']))
Since MongoDB is schemaless, sometimes there will be Null values in a response. You can iterate over these and check to see if the value is None.
cptylists = pd.DataFrame(list(cptylist.find()))
creditRating = []
for rating in cptylists['creditRating']:
if rating['creditRating'] is not None:
creditRating.append(rating['creditRating'])
else:
creditRating.append('No value in database')
creditRating = pd.DataFrame(creditRating)
The list comprehension version of this would be something like:
if 'creditRating' in cptylists:
creditRating = pd.DataFrame([k for k in (cptylists['creditRating'] or [])] )
This code seems to work fine when I used Django console and just print it.
reference = FloodHazard.objects.filter(hazard='High')
ids = reference.values_list('id', flat=True)
for myid in ids:
getgeom = FloodHazard.objects.get(id=myid).geom
response = BuildingStructure.objects.filter(geom__intersects=getgeom).values(
'brgy_locat').annotate(counthigh=Count('brgy_locat'))
print response
I was able to show all the values, but when using HttpResponse, it returns an empty set. What is the proper way of returning JSON data from a queryset? So far, tried this:
reference = FloodHazard.objects.filter(hazard='High')
ids = reference.values_list('id', flat=True)
response = {}
for myid in ids:
getgeom = FloodHazard.objects.get(id=myid).geom
response['high'] = BuildingStructure.objects.filter(geom__intersects=getgeom).values(
'brgy_locat').annotate(counthigh=Count('brgy_locat'))
json_post = ujson.dumps(list(response))
return HttpResponse(json_post, content_type='application/json')
There is no much sense in your code. You assign all querysets to the single key in the response dict. You should use a list for this purpose:
As far as I understand the code should be something like this:
response = []
for myid in ids:
getgeom = FloodHazard.objects.get(id=myid).geom
response.extend(BuildingStructure.objects.filter(geom__intersects=getgeom)
.values('brgy_locat')
.annotate(counthigh=Count('brgy_locat')))
json_post = ujson.dumps(response)
If you want to return a hazard level as well as the list of buildings then you can return a dict:
json_post = ujson.dumps({'hazard': 'high', 'buildings': response})