loading a json file using python [duplicate] - json

I am getting some data from a JSON file "new.json", and I want to filter some data and store it into a new JSON file. Here is my code:
import json
with open('new.json') as infile:
data = json.load(infile)
for item in data:
iden = item.get["id"]
a = item.get["a"]
b = item.get["b"]
c = item.get["c"]
if c == 'XYZ' or "XYZ" in data["text"]:
filename = 'abc.json'
try:
outfile = open(filename,'ab')
except:
outfile = open(filename,'wb')
obj_json={}
obj_json["ID"] = iden
obj_json["VAL_A"] = a
obj_json["VAL_B"] = b
And I am getting an error, the traceback is:
File "rtfav.py", line 3, in <module>
data = json.load(infile)
File "/usr/lib64/python2.7/json/__init__.py", line 278, in load
**kw)
File "/usr/lib64/python2.7/json/__init__.py", line 326, in loads
return _default_decoder.decode(s)
File "/usr/lib64/python2.7/json/decoder.py", line 369, in decode
raise ValueError(errmsg("Extra data", s, end, len(s)))
ValueError: Extra data: line 88 column 2 - line 50607 column 2 (char 3077 - 1868399)
Here is a sample of the data in new.json, there are about 1500 more such dictionaries in the file
{
"contributors": null,
"truncated": false,
"text": "#HomeShop18 #DreamJob to professional rafter",
"in_reply_to_status_id": null,
"id": 421584490452893696,
"favorite_count": 0,
"source": "Mobile Web (M2)",
"retweeted": false,
"coordinates": null,
"entities": {
"symbols": [],
"user_mentions": [
{
"id": 183093247,
"indices": [
0,
11
],
"id_str": "183093247",
"screen_name": "HomeShop18",
"name": "HomeShop18"
}
],
"hashtags": [
{
"indices": [
12,
21
],
"text": "DreamJob"
}
],
"urls": []
},
"in_reply_to_screen_name": "HomeShop18",
"id_str": "421584490452893696",
"retweet_count": 0,
"in_reply_to_user_id": 183093247,
"favorited": false,
"user": {
"follow_request_sent": null,
"profile_use_background_image": true,
"default_profile_image": false,
"id": 2254546045,
"verified": false,
"profile_image_url_https": "https://pbs.twimg.com/profile_images/413952088880594944/rcdr59OY_normal.jpeg",
"profile_sidebar_fill_color": "171106",
"profile_text_color": "8A7302",
"followers_count": 87,
"profile_sidebar_border_color": "BCB302",
"id_str": "2254546045",
"profile_background_color": "0F0A02",
"listed_count": 1,
"profile_background_image_url_https": "https://abs.twimg.com/images/themes/theme1/bg.png",
"utc_offset": null,
"statuses_count": 9793,
"description": "Rafter. Rafting is what I do. Me aur mera Tablet. Technocrat of Future",
"friends_count": 231,
"location": "",
"profile_link_color": "473623",
"profile_image_url": "http://pbs.twimg.com/profile_images/413952088880594944/rcdr59OY_normal.jpeg",
"following": null,
"geo_enabled": false,
"profile_banner_url": "https://pbs.twimg.com/profile_banners/2254546045/1388065343",
"profile_background_image_url": "http://abs.twimg.com/images/themes/theme1/bg.png",
"name": "Jayy",
"lang": "en",
"profile_background_tile": false,
"favourites_count": 41,
"screen_name": "JzayyPsingh",
"notifications": null,
"url": null,
"created_at": "Fri Dec 20 05:46:00 +0000 2013",
"contributors_enabled": false,
"time_zone": null,
"protected": false,
"default_profile": false,
"is_translator": false
},
"geo": null,
"in_reply_to_user_id_str": "183093247",
"lang": "en",
"created_at": "Fri Jan 10 10:09:09 +0000 2014",
"filter_level": "medium",
"in_reply_to_status_id_str": null,
"place": null
}

As you can see in the following example, json.loads (and json.load) does not decode multiple json object.
>>> json.loads('{}')
{}
>>> json.loads('{}{}') # == json.loads(json.dumps({}) + json.dumps({}))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\json\__init__.py", line 338, in loads
return _default_decoder.decode(s)
File "C:\Python27\lib\json\decoder.py", line 368, in decode
raise ValueError(errmsg("Extra data", s, end, len(s)))
ValueError: Extra data: line 1 column 3 - line 1 column 5 (char 2 - 4)
If you want to dump multiple dictionaries, wrap them in a list, dump the list (instead of dumping dictionaries multiple times)
>>> dict1 = {}
>>> dict2 = {}
>>> json.dumps([dict1, dict2])
'[{}, {}]'
>>> json.loads(json.dumps([dict1, dict2]))
[{}, {}]

Iterate over the file, loading each line as JSON in the loop:
tweets = []
for line in open('tweets.json', 'r'):
tweets.append(json.loads(line))
This avoids storing intermediate python objects. As long as you write one full tweet per append() call, this should work.

I came across this because I was trying to load a JSON file dumped from MongoDB. It was giving me an error
JSONDecodeError: Extra data: line 2 column 1
The MongoDB JSON dump has one object per line, so what worked for me is:
import json
data = [json.loads(line) for line in open('data.json', 'r')]

This may also happen if your JSON file is not just 1 JSON record.
A JSON record looks like this:
[{"some data": value, "next key": "another value"}]
It opens and closes with a bracket [ ], within the brackets are the braces { }. There can be many pairs of braces, but it all ends with a close bracket ].
If your json file contains more than one of those:
[{"some data": value, "next key": "another value"}]
[{"2nd record data": value, "2nd record key": "another value"}]
then loads() will fail.
I verified this with my own file that was failing.
import json
guestFile = open("1_guests.json",'r')
guestData = guestFile.read()
guestFile.close()
gdfJson = json.loads(guestData)
This works because 1_guests.json has one record []. The original file I was using all_guests.json had 6 records separated by newline. I deleted 5 records, (which I already checked to be bookended by brackets) and saved the file under a new name. Then the loads statement worked.
Error was
raise ValueError(errmsg("Extra data", s, end, len(s)))
ValueError: Extra data: line 2 column 1 - line 10 column 1 (char 261900 - 6964758)
PS. I use the word record, but that's not the official name. Also, if your file has newline characters like mine, you can loop through it to loads() one record at a time into a json variable.

I just got the same error while my json file is like this
{"id":"1101010","city_id":"1101","name":"TEUPAH SELATAN"}
{"id":"1101020","city_id":"1101","name":"SIMEULUE TIMUR"}
And I found it malformed, so I changed it to:
{
"datas":[
{"id":"1101010","city_id":"1101","name":"TEUPAH SELATAN"},
{"id":"1101020","city_id":"1101","name":"SIMEULUE TIMUR"}
]
}

One-liner for your problem:
data = [json.loads(line) for line in open('tweets.json', 'r')]

If you want to solve it in a two-liner you can do it like this:
with open('data.json') as f:
data = [json.loads(line) for line in f]

The error is due to the \nsymbol if you use the read()method of the file descriptor... so don't bypass the problem by using readlines()& co but just remove such character!
import json
path = # contains for example {"c": 4} also on multy-lines
new_d = {'new': 5}
with open(path, 'r') as fd:
d_old_str = fd.read().replace('\n', '') # remove all \n
old_d = json.loads(d_old_str)
# update new_d (python3.9 otherwise new_d.update(old_d))
new_d |= old_d
with open(path2, 'w') as fd:
fd.write(json.dumps(new_d)) # save the dictionary to file (in case needed)
... and if you really really want to use readlines() here an alternative solution
new_d = {'new': 5}
with open('some_path', 'r') as fd:
d_old_str = ''.join(fd.readlines()) # concatenate the lines
d_old = json.loads(d_old_str)
# then as above

I think saving dicts in a list is not an ideal solution here proposed by #falsetru.
Better way is, iterating through dicts and saving them to .json by adding a new line.
Our 2 dictionaries are
d1 = {'a':1}
d2 = {'b':2}
you can write them to .json
import json
with open('sample.json','a') as sample:
for dict in [d1,d2]:
sample.write('{}\n'.format(json.dumps(dict)))
And you can read json file without any issues
with open('sample.json','r') as sample:
for line in sample:
line = json.loads(line.strip())
Simple and efficient

My json file was formatted exactly as the one in the question but none of the solutions here worked out. Finally I found a workaround on another Stackoverflow thread. Since this post is the first link in Google search, I put the that answer here so that other people come to this post in the future will find it more easily.
As it's been said there the valid json file needs "[" in the beginning and "]" in the end of file. Moreover, after each json item instead of "}" there must be a "},". All brackets without quotations! This piece of code just modifies the malformed json file into its correct format.
https://stackoverflow.com/a/51919788/2772087

If your data is from a source outside your control, use this
def load_multi_json(line: str) -> [dict]:
"""
Fix some files with multiple objects on one line
"""
try:
return [json.loads(line)]
except JSONDecodeError as err:
if err.msg == 'Extra data':
head = [json.loads(line[0:err.pos])]
tail = FrontFile.load_multi_json(line[err.pos:])
return head + tail
else:
raise err

Related

"trailing data" error when reading json to Pandas dataframe

I have a Python 3.8.5 script that gets a JSON from an API, saves to disk, reads JSON to DF. It works.
df = pd.io.json.read_json('json_file', orient='records')
I want to try IO buffer instead so I don't have to read/write to disk, but I am getting an error. The code is like this:
from io import StringIO
io = StringIO()
json_out = []
# some code to append API results to json_out
json.dump(json_out, io)
df = pd.io.json.read_json(io.getvalue())
On that last line I get the error
File "C:\Users\chap\Anaconda3\lib\site-packages\pandas\util\_decorators.py", line 199, in wrapper
return func(*args, **kwargs)
File "C:\Users\chap\Anaconda3\lib\site-packages\pandas\util\_decorators.py", line 296, in wrapper
return func(*args, **kwargs)
File "C:\Users\chap\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 618, in read_json
result = json_reader.read()
File "C:\Users\chap\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 755, in read
obj = self._get_object_parser(self.data)
File "C:\Users\chap\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 777, in _get_object_parser
obj = FrameParser(json, **kwargs).parse()
File "C:\Users\chap\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 886, in parse
self._parse_no_numpy()
File "C:\Users\chap\Anaconda3\lib\site-packages\pandas\io\json\_json.py", line 1119, in _parse_no_numpy
loads(json, precise_float=self.precise_float), dtype=None
ValueError: Trailing data
The JSON is in a list format. So this is not the actual json but it looks like this when I write to disk:
json = [
{"state": "North Dakota",
"address": "123 30th st E #206",
"account": "123"
},
{"state": "North Dakota",
"address": "456 30th st E #206",
"account": "456"
}
]
Given that it worked in the first case (write/read from disk), I don't know how to troubleshoot. How do I troubleshoot something in the buffer? The actual data is mostly text but has some number fields.
Don't know what's going wrong for you, this works for me:
import json
import pandas as pd
from io import StringIO
json_out = [
{"state": "North Dakota",
"address": "123 30th st E #206",
"account": "123"
},
{"state": "North Dakota",
"address": "456 30th st E #206",
"account": "456"
}
]
io = StringIO()
json.dump(json_out, io)
df = pd.io.json.read_json(io.getvalue())
print(df)
leads me to believe there's something wrong with the code that appends the API data...
However, if you have a list of dictionaries, you don't need the IO step. You can just do:
pd.DataFrame(json_out)
EDIT: I think I remember this error when there was a comma at the end of my json like so:
[
{
"hello":"world",
},
]

Python error: Extra data: line 1 in loading a big Json file

I am trying to read a JSON file which is 370 MB
import json
data = open( "data.json" ,"r")
json.loads(data.read())
and it's not possible to easily find the root cause of the following error,
json.decoder.JSONDecodeError: Extra data: line 1 column 1024109 (char 1024108)
I looked at similar questions and tried the following StackOverflow answer
import json
data = [json.loads(line) for line in open('data.json', 'r')]
But it didn't resolve the issue. I am wondering if there is any solution to find where the error happens in the file. I am getting some other files from the same source and they run without any problem.
A small piece of the Json file is a list of dicts like,
{
"uri": "p",
"source": {
"uri": "dail",
"dataType": "pr",
"title": "Daily"
},
"authors": [
{
"type": "author",
"isAgency": false
}
],
"concepts": [
{
"amb": false,
"imp": true,
"date": "2019-05-23",
"textStart": 2459,
"textEnd": 2467
},
{
"amb": false,
"imp": true,
"date": "2019-05-09",
"textStart": 2684,
"textEnd": 2691
}
],
"shares": {},
"wgt": 100,
"relevance": 100
}
The problem with json library is loaded everything to memory and parsed in full and then handled in-memory, which for such a large amount of data is clearly problematic.
Instead I would suggest to take a look at https://github.com/henu/bigjson
import bigjson
with open('data.json', 'rb') as f:
json_data = bigjson.load(f)

I'm getting an error processing the following JSON file

I'm getting a feed from the web and I need to process it, however I'm getting an error and not sure how to process a list inside a list. I'm sure it's something simple I'm overlooking.
The JSON file is like this
{"alerts":[{"country":"AS","nThumbsUp":0,"city":"Albion, Vic","reportRating":3,"confidence":0,"reliability":5,"type":"JAM","uuid":"19c56810-3b8b-31a1-a658-c779f99b9388","magvar":279,"subtype":"JAM_STAND_STILL_TRAFFIC","street":"Polish Club Driveway","location":{"x":144.807815,"y":-37.771797},"pubMillis":1559688120073},{"country":"AS","nThumbsUp":0,"city":"Calder Park","reportRating":2,"confidence":0,"reliability":5,"type":"WEATHERHAZARD","uuid":"283a1bb4-6c0e-3f84-a4ff-cf187aa97dbd","roadType":2,"magvar":221,"subtype":"HAZARD_ON_SHOULDER_CAR_STOPPED","street":"Calder Park Dr","location":{"x":144.761619,"y":-37.679113},"pubMillis":1559689265092},
url = urllib.request.urlopen(turl)
output = url.read().decode('utf-8')
raw_api_dict = json.loads(output)
for x in json.loads(output)['alerts']:
print(x['country'])
print(x['nThumbsUp'])
print(x['reportRating'])
print(x['confidence'])
print(x['reliability'])
print(x['type'])
print(x['uuid'])
print(x['roadType'])
print(x['magvar'])
print(x['subtype'])
print(x['street'])
print(x['location_x'])
print(x['location_y'])
print(x['pubMillis'])
***This is the error **
Traceback (most recent call last):
File "waze.py", line 58, in
print(x['location_x'][0])
KeyError: 'location_x'
Its mainly because the JSON which you are using is invalid. Use the below JSON.
{
"country": "AS",
"nThumbsUp": 0,
"city": "Taylors Hill",
"reportRating": 1,
"confidence": 0,
"reliability": 5,
"type": "JAM",
"uuid": "a0241505-b0f8-3e83-a9c9-678f3c9039c5",
"roadType": 2,
"magvar": 103,
"subtype": "JAM_STAND_STILL_TRAFFIC",
"street": "Taylors Rd",
"location_x": 144.764866,
"location_y": -37.725576,
"pubMillis": 1559626611999 }
Python compiler considers ' as a special character.
use JSON Validator to validate your JSON always before running in code. I hope my answer helps. See the above comment. I guess json.dumps() could help you in this case.
import json
person_dict = {'name': 'Bob',
'age': 12,
'children': None
}
person_json = json.dumps(person_dict)
# Output: {"name": "Bob", "age": 12, "children": null}
print(person_json)
person_dict = json.loads(person_json)
print( person_dict)
print(person_dict['age'])
use json.dumps to solve the problem if it works.

assign values of nested dict to list in python

I have file that is list of JSON objects. It looks like this :
[
{
"id": 748,
"location": {
"slug": "istanbul",
"parent": {
"id": 442,
"slug": "turkey"
}
},
"rank": 110
},
{
"id": 769,
"location": {
"slug": "dubai",
"parent": {
"id": 473,
"slug": "uae"
}
},
"rank": 24
}
]
I want to create a list of hotel parent names, so i write this code to do this, I read the JSON file and assigned it to a variable, that part is correct. But look at this code :
with open('hotels.json', 'r', encoding="utf8") as hotels_data:
hotels = json.load(hotels_data)
parents_list = []
for item in hotels:
if item["location"]["parent"]["slug"] not in parents_list:
parents_list.append(item["location"]["parent"])
when i run this code, i give this error :
if item["location"]["parent"]["slug"] not in parents_list:
TypeError: 'NoneType' object is not subscriptable
This code does not work, so I tried to print the JSON objects so I wrote this in the loop:
print(item["location"]["parent"]["slug"])
This code prints the values I want, but also give me the exact same error.
thank you for any help.
I tried running the code and it seems to be working fine with your dataset.
However, instead of opening the file to read the data, I just assigned hotels with your dataset, hotels = [...].
The result I got was this:
[{'id': 442, 'slug': 'turkey'}, {'id': 473, 'slug': 'uae'}]
What is your result if you print hotels, is it the same as you shown here?
If you actually have a lot more data in your dataset, then I can presume that some of the dictionaries don't contain item["location"]["parent"]["slug"]. If that is the case, you should skip those by checking if that element exists in each item first before reading off from the parents_list.
For example:
try:
item["location"]["parent"]["slug"]
except (KeyError, TypeError) as e:
pass
else:
if item["location"]["parent"]["slug"] not in parents_list:
parents_list.append(item["location"]["parent"])
I cannot replicate the same error as you. The only thing that I can think of is that the last item in each object in the JSON shouldn't have a comma after it. See if that fixes your error

Unable to loop through JSON output from webservice Python

I have a web-service call (HTTP Get) that my Python script makes in which returns a JSON response. The response looks to be a list of Dictionaries. The script's purpose is to iterate through the each dictionary, extract each piece of metadata (i.e. "ClosePrice": "57.74",) and write each dictionary to its own row in Mssql.
The issue is, I don't think Python is recognizing the JSON output from the API call as a list of dictionaries, and when I try a for loop, I'm getting the error must be int not str. I have tried converting the output to a list, dictionary, tuple. I've also tried to make it work with List Comprehension, with no luck. Further, if I copy/paste the data from the API call and assign it to a variable, it recognizes that its a list of dictionaries without issue. Any help would be appreciated. I'm using Python 2.7.
Here is the actual http call being made: http://test.kingegi.com/Api/QuerySystem/GetvalidatedForecasts?user=kingegi&market=us&startdate=08/19/13&enddate=09/12/13
Here is an abbreviated JSON output from the API call:
[
{
"Id": "521d992cb031e30afcb45c6c",
"User": "kingegi",
"Symbol": "psx",
"Company": "phillips 66",
"MarketCap": "34.89B",
"MCapCategory": "large",
"Sector": "basic materials",
"Movement": "up",
"TimeOfDay": "close",
"PredictionDate": "2013-08-29T00:00:00Z",
"Percentage": ".2-.9%",
"Latency": 37.48089483333333,
"PickPosition": 2,
"CurrentPrice": "57.10",
"ClosePrice": "57.74",
"HighPrice": null,
"LowPrice": null,
"Correct": "FALSE",
"GainedPercentage": 0,
"TimeStamp": "2013-08-28T02:31:08 778",
"ResponseMsg": "",
"Exchange": "NYSE "
},
{
"Id": "521d992db031e30afcb45c71",
"User": "kingegi",
"Symbol": "psx",
"Company": "phillips 66",
"MarketCap": "34.89B",
"MCapCategory": "large",
"Sector": "basic materials",
"Movement": "down",
"TimeOfDay": "close",
"PredictionDate": "2013-08-29T00:00:00Z",
"Percentage": "16-30%",
"Latency": 37.4807215,
"PickPosition": 1,
"CurrentPrice": "57.10",
"ClosePrice": "57.74",
"HighPrice": null,
"LowPrice": null,
"Correct": "FALSE",
"GainedPercentage": 0,
"TimeStamp": "2013-08-28T02:31:09 402",
"ResponseMsg": "",
"Exchange": "NYSE "
}
]
Small Part of code being used:
import os,sys
import subprocess
import glob
from os import path
import urllib2
import json
import time
try:
data = urllib2.urlopen('http://api.kingegi.com/Api/QuerySystem/GetvalidatedForecasts?user=kingegi&market=us&startdate=08/10/13&enddate=09/12/13').read()
except urllib2.HTTPError, e:
print "HTTP error: %d" % e.code
except urllib2.URLError, e:
print "Network error: %s" % e.reason.args[1]
list_id=[x['Id'] for x in data] #test to see if it extracts the ID from each Dict
print(data) #Json output
print(len(data)) #should retrieve the number of dict in list
UPDATE
Answered my own question, here is the method below:
`url = 'some url that is a list of dictionaries' #GetCall
u = urllib.urlopen(url) # u is a file-like object
data = u.read()
newdata = json.loads(data)
print(type(newdata)) # printed data type will show as a list
print(len(newdata)) #the length of the list
newdict = newdata[1] # each element in the list is a dict
print(type(newdict)) # this element is a dict
length = len(newdata) # how many elements in the list
for a in range(1,length): #a is a variable that increments itself from 1 until a number
var = (newdata[a])
print(var['Correct'], var['User'])`