Python read json from site print object - json

I get this json string from a page:
{"lprice":"8330.1","curr1":"BTC","curr2":"EUR"}
I tried to access the lprice with this code:
import requests
def get_latest_price(api, currencie, real):
CEXIO_API_URL = "https://cex.io/api/last_price/%s/%s" % (currencie.upper(), real.upper())
response = requests.get(CEXIO_API_URL)
response_json = response.json()
return float(response_json['lprice'])
But if I do it like this, I get this error:
File
"/home/malte/Entwicklung/cryptoprice/build/all/app/install/qml/cryptoprice.py",
line 16, in get_latest_price
return float(response_json['lprice'])
KeyError: 'lprice'

I assume that your
response_json is your json-string {"lprice":"8330.1","curr1":"BTC","curr2":"EUR"}
Then it should work if you convert the json string into a dictionary with the loads function
import requests
import json
def get_latest_price(api, currencie, real):
CEXIO_API_URL = "https://cex.io/api/last_price/%s/%s" % (currencie.upper(), real.upper())
response = requests.get(CEXIO_API_URL)
response_json = response.json()
response_json = json.loads(response_json)
return float(response_json['lprice'])

Related

BlockingIOError: [Errno 11] write could not complete without blocking

Please help, I have developed this scraper API and it works with internal commands in Pythionanywhere, but when I try to access it using my use account URL, it gives the error: [errno 11] and I have searched for solutions, I co[enter image description here](https://i.stack.imgur.com/cs9T6.png)uldn't find any.
I was expecting a JSON output as it did on the internal server.
#from requests_html import HTMLSession
import json
import requests
class Scraper():
def scrapedata(self, tag):
url = "https://www.etenders.gov.za/Home/TenderOpportunities/?status=1"
headers = {'user-agent': 'Mozilla/5.0'}
response = requests.get(url, headers=headers)
data = response.json()
file_urs = []
for e in data:
item = {
'province': (f"{e['province']}"),
'id' : (f"{e['tender_No']}"),
}
print(file_urs)
file_urs.append(item)
return file_urs
quotes = Scraper()
quotes.scrapedata('cat')
from flask import Flask
import json
from scrape import Scraper
app = Flask(name)
quotes = Scraper()
#app.route('/', methods =['GET', 'POST'])
async def read_item(cat):
json_dumps = json.dumps.scrapedata(cat)
return json_dumps
#return quotes.scrapedata(cat).jon()
from flask import Flask
import json
from scrape import Scraper
app = Flask(name)
quotes = Scraper()
#app.route('/', methods =['GET', 'POST'])
async def read_item(cat):
json_dumps = json.dumps.scrapedata(cat)
return json_dumps
#return quotes.scrapedata(cat).jon()
I was expecting a JSON output as it did on the internal server.

Using the reults of multiple for loops to post a single json response

Okay, so this is a loaded question but and I'm sure theres an easy method to use here, but I'm stuck.
Long story short, I am tasked with creating a function in python (to be run an AWS lambda) which can perform acceptance tests on a series of URL's using python-requests. These requests will be used to assert the HTTP response codes and a custom HTTP header identifying if an haproxy backend is correct.
The URL's themselves will be maintained in a yaml document which will be converted to a dict in python and passed to a for loop which will use python requests to HTTP GET the response code and header of the URL.
The issue I am having is getting a single body object to return the results of multiple for loops.
I have tried to find similar use cases but cannot
import requests
import json
import yaml
def acc_tests():
with open("test.yaml", 'r') as stream:
testurls = yaml.safe_load(stream)
results = {}
# endpoint/path 1
for url in testurls["health endpoints"]:
r = requests.get(url, params="none")
stat = r.status_code
result = json.dumps(print(url, stat))
results = json.dumps(result)
# endpoint path with headers
for url in testurls["xtvapi"]:
headers = {'H': 'xtvapi.cloudtv.comcast.net'}
r = requests.get(url, headers=headers, params="none")
stat = r.status_code
head = r.headers["X-FINITY-TANGO-BACKEND"]
result = json.dumps((url, stat, head))
results = json.dumps(result)
return {
'statusCode': 200,
'body': json.dumps(results)
}
acc_tests()
YAML file:
health endpoints:
- https://xfinityapi-tango-production-aws-us-east-1-active.r53.aae.comcast.net/tango-health/
- https://xfinityapi-tango-production-aws-us-east-1-active.r53.aae.comcast.net/
- https://xfinityapi-tango-production-aws-us-east-2-active.r53.aae.comcast.net/tango-health/
- https://xfinityapi-tango-production-aws-us-east-2-active.r53.aae.comcast.net/
- https://xfinityapi-tango-production-aws-us-west-2-active.r53.aae.comcast.net/tango-health/
- https://xfinityapi-tango-production-aws-us-west-2-active.r53.aae.comcast.net/
xtvapi:
- https://xfinityapi-tango-production-aws-us-east-1-active.r53.aae.comcast.net/
- https://xfinityapi-tango-production-aws-us-east-2-active.r53.aae.comcast.net/
- https://xfinityapi-tango-production-aws-us-west-2-active.r53.aae.comcast.net/
What I think is happening is that both for loops are running one after another, but the value of results is empty, but I'm not sure what to do in order to update/append the results dict with the results of each loop.
Thanks folks. I ended up solving this by creating a dict with immutable keys for each test type and then using append to add the results to a nested list within the dict.
Here is the "working" code as it is in the AWS Lambda function:
from botocore.vendored import requests
import json
import yaml
def acc_tests(event, context):
with open("test.yaml", 'r') as stream:
testurls = yaml.safe_load(stream)
results = {'tango-health': [], 'xtvapi': []}
# Tango Health
for url in testurls["health endpoints"]:
r = requests.get(url, params="none")
result = url, r.status_code
assert r.status_code == 200
results["tango-health"].append(result)
# xtvapi default/cloudtv
for url in testurls["xtvapi"]:
headers = {'H': 'xtvapi.cloudtv.comcast.net'}
r = requests.get(url, headers=headers, params="none")
result = url, r.status_code, r.headers["X-FINITY-TANGO-BACKEND"]
assert r.status_code == 200
assert r.headers["X-FINITY-TANGO-BACKEND"] == "tango-big"
results["xtvapi"].append(result)
resbody = json.dumps(results)
return {
'statusCode': 200,
'body': resbody
}

Working with JSON and Django

I am new to Python and Django. I am an IT professional that deploys software that monitors computers. The api outputs to JSON. I want to create a Django app that reads the api and outputs the data to an html page. Where do I get started? I think the idea is to write the JSON feed to a Django model. Any help/advice is greatly appreciated.
Here's a simple single file to extract the JSON data:
import urllib2
import json
def printResults(data):
theJSON = json.loads(data)
for i in theJSON[""]
def main():
urlData = ""
webUrl = urllib2.urlopen(urlData)
if (webUrl.getcode() == 200):
data = webUrl.read()
printResults(data)
else:
print "Received error"
if __name__ == '__main__':
main()
If you have an URL returning a json as response, you could try this:
import requests
import json
url = 'http://....' # Your api url
response = requests.get(url)
json_response = response.json()
Now json_response is a list containing dicts. Let's suppose you have this structure:
[
{
'code': ABC,
'avg': 14.5,
'max': 30
},
{
'code': XYZ,
'avg': 11.6,
'max': 21
},
...
]
You can iterate over the list and take every dict into a model.
from yourmodels import CurrentModel
...
for obj in json_response:
cm = CurrentModel()
cm.avg = obj['avg']
cm.max = obj['max']
cm.code = obj['code']
cm.save()
Or you could use a bulk method, but keep in mind that bulk_create does not trigger save method.

Processing JSON Response using scrapy

I have the following code in my scrapy spider:
def parse(self, response):
jsonresponse = json.loads(response.body_as_unicode())
htmldata = jsonresponse["html"]
for sel in htmldata.xpath('//li/li'):
-- more xpath codes --
yield item
But i am having this error:
raise ValueError("No JSON object could be decoded")
exceptions.ValueError: No JSON object could be decoded
After checking the json reply, i found out about **<!--WPJM-->** and **<!--WPJM_END-->** which is causing this error.
<!--WPJM-->{"found_jobs":true,"html":"<html code>","max_num_pages":3}<!--WPJM_END-->
How do i parse my scrapy without looking at the !--WPJM-- and !--WPJM_END-- code?
EDIT: This is the error that i have:
File "/home/muhammad/Projects/project/project/spiders/crawler.py", line 150, in parse
for sel in htmldata.xpath('//li'):
exceptions.AttributeError: 'unicode' object has no attribute 'xpath'
def parse(self, response):
rawdata = response.body_as_unicode()
jsondata = rawdata.replace('<!--WPJM-->', '').replace('<!--WPJM_END-->', '')
# print jsondata # For debugging
# pass
data = json.loads(jsondata)
htmldata = data["html"]
# print htmldata # For debugging
# pass
for sel in htmldata.xpath('//li'):
item = ProjectjomkerjaItem()
item['title'] = sel.xpath('a/div[#class="position"]/div[#id="job-title-job-listing"]/strong/text()').extract()
item['company'] = sel.xpath('a/div[#class="position"]/div[#class="company"]/strong/text()').extract()
item['link'] = sel.xpath('a/#href').extract()
The easiest approach would be to get rid of the comments tags manually using replace():
data = response.body_as_unicode()
data = data.replace('<!--WPJM-->', '').replace('<!--WPJM_END-->', '')
jsonresponse = json.loads(data)
Though it is not quite pythonic and reliable.
Or, a better option would to be to get the text() by xpath:
$ scrapy shell index.html
>>> response.xpath('//text()').extract()[0]
u'{"found_jobs":true,"html":"<html code"}'

No JSON object could be decoded

while running this script i am getting the following error...
ERROR: getTest failed. No JSON object could be decoded
My code is:
import json
import urllib
class test:
def getTest(self):
try:
url = 'http://www.exapmle.com/id/123456/'
json_ = urllib.urlopen(url).read()
self.id = json.loads(json_)
print self.id
except Exception, e:
print "ERROR: getTest failed. %s" % e
if __name__ == "__main__":
ti = test()
ti.getTest()
while opening this url "http://www.exapmle.com/id/123456/" in browser i am getting json format data, but why it is throwing an error?
Print the json_ before calling json.loads(). If there is in an error in your url, urllib does not necessarily throw an exception, it might just retrieve some error page (like with the provided url), that is not valid json.