How do I properly request information from this API? - json

Like the title says, I'm having problems accessing information from an API. This is the message I receive when running the function:
{'message': 'no Route matched with those values'}
This is the full code: API key blocked out just because.
import tkinter as tk
import requests
# API Key : xxxxxx-xxxx-xxxxx-xxxx-xxxxxxx
# Player Stats Request: https://api.fortnitetracker.com/v1/profile/{platform}/{epic-nickname}
squad = {}
platforms = ['xb1', 'psn', 'pc']
def get_player(player):
headers = {'TRN-Api-Key': 'xxxxxx-xxxx-xxxxx-xxxx-xxxxxxx'}
url = 'https://api.fortnitetracker.com/v1/profile'
params = {'platform': 'pc', 'epic-nickname': player}
response = requests.get(url, params=params, headers=headers)
print(response.json())
# setting up the screen
root = tk.Tk()
root.geometry("800x600")
root.maxsize(800, 600)
root.title("Fortnite Squad Leaderboard")
root.iconphoto(True, tk.PhotoImage(file="bigpot.png"))
# background image
background_img = tk.PhotoImage(file='fortnitebg.png')
background_label = tk.Label(root, image=background_img)
background_label.place(relwidth=1, relheight=1)
top_frame = tk.Frame(root, bg="lightblue")
top_frame.place(relx=0, rely=0, relwidth=0.4, relheight=0.06)
player_search_button = tk.Button(top_frame, text="Player Search", command=lambda:
get_player(player_search.get()))
player_search_button.place(relx=0.7, rely=0.2, relwidth=0.28, relheight=0.6)
player_search = tk.Entry(top_frame)
player_search.place(relx=0.05, rely=0.2, relwidth=0.63, relheight=0.6)
root.mainloop()

The URL https://api.fortnitetracker.com/v1/profile/{platform}/{epic-nickname} accepts the params platform and epic-nickname as URL Params and not Query Params
Query params are those params you add at the end of the URL like ?k=v&k1=v1 and URL params are part of the URL.
So the URL should look like
https://api.fortnitetracker.com/v1/profile/pc/player
So instead of doing
url = 'https://api.fortnitetracker.com/v1/profile'
params = {'platform': 'pc', 'epic-nickname': player}
response = requests.get(url, params=params, headers=headers)
You should do like
url = f'https://api.fortnitetracker.com/v1/profile/pc/{player}'
response = requests.get(url, headers=headers)
This must work for you.

Related

How to add an user input to to an URL with Tkinter?

I'm trying to create a program in which currency conversion is crucial. I have an API URL to which I want to add user input (amount that is supposed to be converted) not to display it in a label but to work with it later on.
input = Entry(root)
URL = "https://www.myawesomeurl.com/exchangerate/amout="
payload = {}
headers = {"apikey": "awjdwahduwahdwauduw"}
response = requests.request("GET", URL, headers=headers, data = payload)
status_code = response.status_code
result = response.text
I'm new to programming so any help and tips would be much appreciated.
You can use a submit function to process your data. Ex.
import tkinter
import requests
root = tkinter.Tk()
def submitamount():
amounttext = amount.get()
URL = f"https://www.myawesomeurl.com/exchangerate/amout={amounttext}"
payload = {}
headers = {"apikey": "awjdwahduwahdwauduw"}
response = requests.request("GET", URL, headers=headers, data=payload)
status_code = response.status_code
result = response.text
# Do stuff here / call another function to do stuff
#
# If you want to make some variables accessible to other parts of
# the code, simply define them before this function is declared;
# Ex.
#
# Instead of:
# root = tkinter.Tk()
# ...
# def submitamount():
# ...
#
# Do:
# root = tkinter.Tk()
# ...
# response = None
# status_code = None
# result = None
#
# def submitamount():
# ...
#
amount = tkinter.Entry(root)
amount.pack()
submit = tkinter.Button(text="Submit", command=submitamount)
submit.pack()
root.mainloop()
If you need a more thorough explanation, please let me know!

Using the reults of multiple for loops to post a single json response

Okay, so this is a loaded question but and I'm sure theres an easy method to use here, but I'm stuck.
Long story short, I am tasked with creating a function in python (to be run an AWS lambda) which can perform acceptance tests on a series of URL's using python-requests. These requests will be used to assert the HTTP response codes and a custom HTTP header identifying if an haproxy backend is correct.
The URL's themselves will be maintained in a yaml document which will be converted to a dict in python and passed to a for loop which will use python requests to HTTP GET the response code and header of the URL.
The issue I am having is getting a single body object to return the results of multiple for loops.
I have tried to find similar use cases but cannot
import requests
import json
import yaml
def acc_tests():
with open("test.yaml", 'r') as stream:
testurls = yaml.safe_load(stream)
results = {}
# endpoint/path 1
for url in testurls["health endpoints"]:
r = requests.get(url, params="none")
stat = r.status_code
result = json.dumps(print(url, stat))
results = json.dumps(result)
# endpoint path with headers
for url in testurls["xtvapi"]:
headers = {'H': 'xtvapi.cloudtv.comcast.net'}
r = requests.get(url, headers=headers, params="none")
stat = r.status_code
head = r.headers["X-FINITY-TANGO-BACKEND"]
result = json.dumps((url, stat, head))
results = json.dumps(result)
return {
'statusCode': 200,
'body': json.dumps(results)
}
acc_tests()
YAML file:
health endpoints:
- https://xfinityapi-tango-production-aws-us-east-1-active.r53.aae.comcast.net/tango-health/
- https://xfinityapi-tango-production-aws-us-east-1-active.r53.aae.comcast.net/
- https://xfinityapi-tango-production-aws-us-east-2-active.r53.aae.comcast.net/tango-health/
- https://xfinityapi-tango-production-aws-us-east-2-active.r53.aae.comcast.net/
- https://xfinityapi-tango-production-aws-us-west-2-active.r53.aae.comcast.net/tango-health/
- https://xfinityapi-tango-production-aws-us-west-2-active.r53.aae.comcast.net/
xtvapi:
- https://xfinityapi-tango-production-aws-us-east-1-active.r53.aae.comcast.net/
- https://xfinityapi-tango-production-aws-us-east-2-active.r53.aae.comcast.net/
- https://xfinityapi-tango-production-aws-us-west-2-active.r53.aae.comcast.net/
What I think is happening is that both for loops are running one after another, but the value of results is empty, but I'm not sure what to do in order to update/append the results dict with the results of each loop.
Thanks folks. I ended up solving this by creating a dict with immutable keys for each test type and then using append to add the results to a nested list within the dict.
Here is the "working" code as it is in the AWS Lambda function:
from botocore.vendored import requests
import json
import yaml
def acc_tests(event, context):
with open("test.yaml", 'r') as stream:
testurls = yaml.safe_load(stream)
results = {'tango-health': [], 'xtvapi': []}
# Tango Health
for url in testurls["health endpoints"]:
r = requests.get(url, params="none")
result = url, r.status_code
assert r.status_code == 200
results["tango-health"].append(result)
# xtvapi default/cloudtv
for url in testurls["xtvapi"]:
headers = {'H': 'xtvapi.cloudtv.comcast.net'}
r = requests.get(url, headers=headers, params="none")
result = url, r.status_code, r.headers["X-FINITY-TANGO-BACKEND"]
assert r.status_code == 200
assert r.headers["X-FINITY-TANGO-BACKEND"] == "tango-big"
results["xtvapi"].append(result)
resbody = json.dumps(results)
return {
'statusCode': 200,
'body': resbody
}

JWT is being converted from str to bytes on requests.post()

I am writing a Flask application in which I have a service that generates a JWT and passes this onto another service using requests.post(), after decoding it to 'UTF-8'.
While sending the JWT, I can see that the type is 'str'. However, on performing a json.loads() on the other service, I get an error that says
TypeError: the JSON object must be str, not 'bytes'
Here is my code:
Service 1:
#app.route('/')
def index():
token = jwt.encode({'message': 'Hello'}, app.config['SECRET_KEY'])
# After this statement I am able to verify the type is str and not bytes
token = token.decode('UTF-8')
headers = {'content-type': 'application/json'}
url = 'someUrl'
data = {"token": token}
data = json.dumps(data)
requests.post(url, data=data, headers=headers)
return 'Success'
Service 2:
#app.route('/', methods=['POST'])
def index():
data = json.loads(request.data)
return 'Success'
Why do I get this error even though the type was converted to string ?
EDIT: I was able to successfully retrieve the token by passing it through header. But I would still like to know what caused this error.
You could post it as JSON instead of data, and let the underlying library take care of it for you.
Service 1
#app.route('/')
def index():
token = jwt.encode({'message': 'Hello'}, app.config['SECRET_KEY']).decode('UTF-8')
url = 'someUrl'
data = {"token": token}
requests.post(url, json=data)
return 'Success'
Service 2
data = request.get_json()

Fail to store data in csv file through scraping

I try to scraping a webpage and extracting data ,then store all data in a csv file. Before adding ScrapeCallback class and calling it, everything works fine. However, it does not store any type of data except headers in the cvs file after adding the new class. Can anyone help me to figure out the problem?
import re
import urlparse
import urllib2
import time
from datetime import datetime
import robotparser
import Queue
import csv
import lxml.html
class ScrapeCallback:
# extract and store all data in a csv file
def __init__( self):
self.writer = csv.writer(open('countries.csv', 'w'))
self.fields = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours')
self.writer.writerow( self.fields)
def __call__( self, url, html):
if re.search('/view/',url):
tree = lxml.html.fromstring(html)
row = []
for field in self.fields:
row.append(tree.cssselect('table > tr#places_{}__row > td.w2p_fw'.format(field))[0].text_content())
print row
self.writer.writerow(row)
def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, headers=None, user_agent='wswp', proxy=None, num_retries=1, scrape_callback=None):
"""Crawl from the given seed URL following links matched by link_regex
"""
# the queue of URL's that still need to be crawled
crawl_queue = [seed_url]
# the URL's that have been seen and at what depth
seen = {seed_url: 0}
# track how many URL's have been downloaded
num_urls = 0
rp = get_robots(seed_url)
throttle = Throttle(delay)
headers = headers or {}
if user_agent:
headers['User-agent'] = user_agent
while crawl_queue:
url = crawl_queue.pop()
depth = seen[url]
# check url passes robots.txt restrictions
if rp.can_fetch(user_agent, url):
throttle.wait(url)
html = download(url, headers, proxy=proxy, num_retries=num_retries)
links = []
if scrape_callback:
links.extend(scrape_callback(url, html) or [])
if depth != max_depth:
# can still crawl further
if link_regex:
# filter for links matching our regular expression
links.extend(link for link in get_links(html) if re.match(link_regex, link))
for link in links:
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
seen[link] = depth + 1
# check link is within same domain
if same_domain(seed_url, link):
# success! add this new link to queue
crawl_queue.append(link)
# check whether have reached downloaded maximum
num_urls += 1
if num_urls == max_urls:
break
else:
print 'Blocked by robots.txt:', url
class Throttle:
"""Throttle downloading by sleeping between requests to same domain
"""
def __init__(self, delay):
# amount of delay between downloads for each domain
self.delay = delay
# timestamp of when a domain was last accessed
self.domains = {}
def wait(self, url):
"""Delay if have accessed this domain recently
"""
domain = urlparse.urlsplit(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
if sleep_secs > 0:
time.sleep(sleep_secs)
self.domains[domain] = datetime.now()
def download(url, headers, proxy, num_retries, data=None):
print 'Downloading:', url
request = urllib2.Request(url, data, headers)
opener = urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except urllib2.URLError as e:
print 'Download error:', e.reason
html = ''
if hasattr(e, 'code'):
code = e.code
if num_retries > 0 and 500 <= code < 600:
# retry 5XX HTTP errors
html = download(url, headers, proxy, num_retries-1, data)
else:
code = None
return html
def normalize(seed_url, link):
"""Normalize this URL by removing hash and adding domain
"""
link, _ = urlparse.urldefrag(link) # remove hash to avoid duplicates
return urlparse.urljoin(seed_url, link)
def same_domain(url1, url2):
"""Return True if both URL's belong to same domain
"""
return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc
def get_robots(url):
"""Initialize robots parser for this domain
"""
rp = robotparser.RobotFileParser()
rp.set_url(urlparse.urljoin(url, '/robots.txt'))
rp.read()
return rp
def get_links(html):
"""Return a list of links from html
"""
# a regular expression to extract all links from the webpage
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
# list of all links from the webpage
return webpage_regex.findall(html)
if __name__ == '__main__':
# link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, user_agent='BadCrawler')
# link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, max_depth=1, user_agent='GoodCrawler')
link_crawler('http://example.webscraping.com', '/(index|view)', max_depth =2, scrape_callback = ScrapeCallback())

I'm using TMDB api to show movie poster but I'm getting some errors

**here is my code**
import os
import requests
CONFIG_PATTERN = 'http://api.themoviedb.org/3/search/movie?query=Monsters+University&api_key=xxx'
IMG_PATTERN = 'http://api.themoviedb.org/3/movie?query=Monsters+University&api_key=xxx'
KEY = 'xxx'
def _get_json(url):
r = requests.get(url)
return r.json()
def _download_images(urls, path='.'):
"""download all images in list 'urls' to 'path' """
for nr, url in enumerate(urls):
r = requests.get(url)
filetype = r.headers['content-type'].split('/')[-1]
filename = 'poster_{0}.{1}'.format(nr+1,filetype)
filepath = os.path.join(path, filename)
with open(filepath,'wb') as w:
w.write(r.content)
def get_poster_urls(imdbid):
""" return image urls of posters for IMDB id
returns all poster images from 'themoviedb.org'. Uses the
maximum available size.
Args:
imdbid (str): IMDB id of the movie
Returns:
list: list of urls to the images
"""
config = _get_json(CONFIG_PATTERN.format(key=KEY))
base_url = config['images']['base_url']
sizes = config['images']['poster_sizes']
def size_str_to_int(x):
return float("inf") if x == 'original' else int(x[1:])
max_size = max(sizes, key=size_str_to_int)
posters = _get_json(IMG_PATTERN.format(key=KEY,imdbid=imdbid))['posters']
poster_urls = []
for poster in posters:
rel_path = poster['file_path']
url = "{0}{1}{2}".format(base_url, max_size, rel_path)
poster_urls.append(url)
return poster_urls
def tmdb_posters(imdbid, count=None, outpath='.'):
urls = get_poster_urls(imdbid)
if count is not None:
urls = urls[:count]
_download_images(urls, outpath)
if __name__=="__main__":
tmdb_posters('tt0095016')
please format the code accordingly iam fetching the json data using TMDB api but iam getting errors
here are errors
Traceback (most recent call last):
File "C:/Users/ayushblueluck/PycharmProjects/MovieDatabase/test.py", line 57, in <module>
tmdb_posters('tt0095016')
File "C:/Users/ayushblueluck/PycharmProjects/MovieDatabase/test.py", line 51, in tmdb_posters
urls = get_poster_urls(imdbid)
File "C:/Users/ayushblueluck/PycharmProjects/MovieDatabase/test.py", line 33, in get_poster_urls
base_url = config['images']['base_url']
KeyError: 'images'
Process finished with exit code 1
But iam unable to figuring out the errors it seems like everything is right but urrghh these errors are not going i have tried everything
I guess it should work if you change CONFIG_PATTERN to http://api.themoviedb.org/3/configuration?api_key=<your_key>
BTW, edited your question since you posted your API key in it.