TypeError: string indices must be integers - json

Hi i have a problem with my code that i get a error in a loop that works for a few times but then throws me a typeerro: string indices must be integers.
I want to call an api to get a json back and get some parts of the json response. heres the code:
class API(object):
def __init__(self, api_key):
self.api_key = api_key
def _request(self, api_url, params={}):
args = {'api_key': self.api_key}
for key, value in params.items():
if key not in args:
args[key] = value
response = requests.get(
Consts.URL['base'].format(
url=api_url
),
params=args
)
if response.status_code == requests.codes.ok:
return response.json()
else:
return "not possible"
print(response.url)
def get_list(self):
excel = EXCEL('s6.xlsx')
api_url = Consts.URL['list'].format(
version = Consts.API_VERSIONS['matchversion'],
start = excel.get_gamenr()
)
return self._request(api_url)
def get_match(self, matchid):
idlist = matchid
api_url = Consts.URL['match'].format(
version = Consts.API_VERSIONS['matchversion'],
matchId = idlist
)
return self._request(api_url)
def match_ids(self):
api = API('c6ea2f68-7ed6-40fa-9b99-fd591c55c05f')
x = api.get_list()
y = x['matches']
count = len(y)
ids = []
while count > 0:
count = count - 1
temp = y[0]
ids.append(temp['matchId'])
del y[0]
return ids
def match_info(self):
matchids = self.match_ids()
print(matchids)
matchinfolist = {}
counter = 1
for gameids in matchids:
info = self.get_match(gameids)
myid = self.find_partid(info['participantIdentities'])
prepdstats = info['participants'][myid-1]
print(prepdstats)
matchinfolist['stats' + str(counter)] = prepdstats
return matchinfolist
def find_partid(self, partlist):
partid = 0
idlist = partlist
while partid < 10:
partid = partid + 1
tempplayer = idlist[0]['player']
if tempplayer['summonerId'] == 19204660:
playernr = partid
partid = 500
del idlist[0]
return playernr
when i run the match_info() function i get this error
Traceback (most recent call last):
File "C:\Users\Niklas\Desktop\python riot\main.py", line 17, in <module>
main()
File "C:\Users\Niklas\Desktop\python riot\main.py", line 10, in main
print(api.match_info())
File "C:\Users\Niklas\Desktop\python riot\api.py", line 78, in match_info
myid = self.find_partid(info['participantIdentities'])
TypeError: string indices must be integers
but only after the loop in the function has run for a few times. I have no idea what im doing wrong. Any help would be nice.
Here is a link to the json: https://euw.api.pvp.net/api/lol/euw/v2.2/match/2492271473?api_key=c6ea2f68-7ed6-40fa-9b99-fd591c55c05f

The error shows up on
myid = self.find_partid(info['participantIdentities'])
For this line to execute, info must be a mapping with string keys, not a string itself. info is
info = self.get_match(gameids)
get_match ends with
return self._request(api_url)
_request ends with
if response.status_code == requests.codes.ok:
return response.json()
else:
return "not possible"
For the loop to ever run, response.json() must be a dict with key 'participantIdentities'. Your bug is expecting that to always be true.
One fix might be to make the expectation always ture. If there is a satisfactory default value, return {'participantIdentities': <default value>}. Otherwise, return None and change the loop to
info = self.get_match(gameids)
if info is not None:
# as before
else:
# whatever default action you want

Related

DISCORD.PY Creating an end Giveaway Command

I am working on a giveaway bot and after doing the start and reroll command I have run into the end command , which i cannot fully grasp how to do it. I thought to register something when the giveaway is created (msgID of the giveaway im registering) in my aiosqlite database and for the end function , i could be able to fetch it and stop the giveaway. Now here is the thing, i cant think of a function or a task that will end the giveaway or somehow just end the duration.
For reference here is my start command :
class Start(commands.Cog):
def __init__(self, client):
self.client = client
def convert(self, timer):
pos = ["s", "m", "h", "d"]
time_dict = {"s" : 1, "m" : 60, "h" : 3600, "d" : 3600*24}
unit = timer[-1]
if unit not in pos:
return -1
try:
val = int(timer[:-1])
except:
return -2
return val * time_dict[unit]
#commands.command()
async def start(self, ctx, duration, winners: str, *, prize):
timer = (self.convert(duration))
winners = int(winners.replace("w",""))
await ctx.message.delete()
timestamp = time.time() + timer
epoch_time = int((time.time() + timer))
embed = discord.Embed(title = f"{prize}", description = f'React with 🎉 to enter\nEnds: <t:{epoch_time}:R> (<t:{epoch_time}>)\nHosted by {ctx.author.mention}\n', color =
ctx.author.color, timestamp=(datetime.datetime.utcfromtimestamp(timestamp)))
embed.set_footer(text=f'Winners : {winners} | Ends at \u200b')
gaw_msg = await ctx.send(content = "<:spadess:939938117736091678> **GIVEAWAY** <:spadess:939938117736091678>",embed=embed)
await gaw_msg.add_reaction("🎉")
db = await aiosqlite.connect("main.db")
cursor = await db.cursor()
await cursor.execute(f'SELECT * FROM storingStuff WHERE msgID = {gaw_msg.id}')
data = await cursor.fetchone()
if data is None:
await cursor.execute(f'INSERT INTO storingStuff (msgID, guildID) VALUES({gaw_msg.guild.id} , {gaw_msg.id})')
await db.commit()
await cursor.close()
await db.close()
await asyncio.sleep(timer)
new_msg = await ctx.channel.fetch_message(gaw_msg.id)
users_mention = []
for i in range(winners):
users = await new_msg.reactions[0].users().flatten()
users.pop(users.index(self.client.user))
winner = random.choice(users)
users_mention.append(winner.mention)
users.remove(winner)
displayed_winners = ",".join(users_mention)
endembed = discord.Embed(title=f"{prize}", description=f"Winner: {displayed_winners}\nHosted by: {ctx.author.mention}", color = ctx.author.color, timestamp=(datetime.datetime.utcfromtimestamp(timestamp)))
endembed.set_footer(text= 'Ended at \u200b')
await gaw_msg.edit(content = "<:done:939940228746072096> **GIVEAWAY ENDED** <:done:939940228746072096>",embed=endembed)
await ctx.send(f"Congragulations {displayed_winners}! You won the **{prize}**.\n{gaw_msg.jump_url}")
def setup(client):
client.add_cog(Start(client))
Any help with the case would be appreciated , or any code reference as I'm pretty new. Thank you for spending your time and reading this.

Return function value as a value in a dictionary

dic1 = {}
class Piglet:
pass
def __init__(self,name,age):
self.name = name
self.age = age
def speak_or_not(self):
if self.age>2:
return True
else:
return False
def speak_dic(self):
global dic1
dic1[self.name] = speak_or_not()
pig1 = Piglet("Shaun",3)
pig1.speak_dic()
print(dic1)
I want to add the return value of the function speak_or_not as a value of dictionary dic1 which will result in a output like: {"Shaun":True} since age>2. But it prints an empty dictionary.
How to call a function and set the return value of the function as a value of the dictionary?
The code is not correct. In the speak_dic function, you are calling speak_or_not, which does not exist. Instead, you need to call self.speak_or_not.
Attaching the corrected code below.
dic1 = {}
class Piglet:
def __init__(self,name,age):
self.name = name
self.age = age
def speak_or_not(self):
if self.age>2:
return True
else:
return False
def speak_dic(self):
global dic1
dic1[self.name] = self.speak_or_not()
pig1 = Piglet("Shaun",3)
pig1.speak_dic()
print(pig1)
Also, you need to print dic1 to show the dictionary.
Hope this helps.

Open Binary File in a URL

In Document Module when I click on a link of binary file , it will be downloaded as JSON Object.
How can I open it directly in Web Browser.
I found the code implement the downloading attachment file for Document Module :
enter co#http.route('/web/binary/saveas_ajax', type='http', auth="user")
#serialize_exception
def saveas_ajax(self, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = { field: data }
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))],
cookies={'fileToken': token})de here

Django error: AttributeError at - 'Manager' object has no attribute 'META'

I'm using django 1.4, and python 2.7
I'm trying to get data from the MySQL database...
views.py:
def myRequests(request):
#request = ProjectRequest.objects
response = render_to_response('myRequests.html', {'ProjectRequest': request}, context_instance = RequestContext(request))
return response
As soon as I uncomment 'request = ProjectRequest.objects' I get the error:
AttributeError at /myRequests/
'Manager' object has no attribute 'META'
I'm not defining any new user models so this error makes no sense to me.
Exception location:
/{path}/lib/python2.7/site-packages/django/core/context_processors.py in debug, line 35
modelsRequest.py:
class ProjectRequest(Model):
reqType = CharField("Request Type", max_length = MAX_CHAR_LENGTH)
reqID = IntegerField("Request ID", primary_key = True)
ownerID = IntegerField("Owner ID")
projCodeName = CharField("Project Code Name", max_length = MAX_CHAR_LENGTH)
projPhase = CharField("Project Phase", max_length = MAX_CHAR_LENGTH)
projType = CharField("Project Phase", max_length = MAX_CHAR_LENGTH)
projNotes = CharField("Project Notes", max_length = MAX_CHAR_LENGTH)
contacts = CharField("Project Notes", max_length = MAX_CHAR_LENGTH)
reqStatus = CharField("Status", max_length = MAX_CHAR_LENGTH)
reqPriority = CharField("Request Priority", max_length = MAX_CHAR_LENGTH)
reqEstDate = DateTimeField("Request Estimated Complete Date", auto_now_add = True)
lastSaved = DateTimeField("Last saved", auto_now = True)
created = DateTimeField("Created", auto_now_add = True)
def __unicode__(self):
return str(self.reqID) + ": " + str(self.reqType)
class Meta:
app_label = 'djangoTestApp'
db_table = 'requests'
Any idea what's going on?!
Use a different variable name
project_request = ProjectRequest.objects
The issue is because of this context_instance = RequestContext(request)
It loses context of the request object as it has been overwritten. Hence the issue.
def myRequests(request):
project_request = ProjectRequest.objects
#Now you have access to request object,
# do whatever you want with project_request -
response = render_to_response('myRequests.html', {'ProjectRequest': project_request}, context_instance = RequestContext(request))
return response
The reason you are getting the error 'Manager' object has no attribute 'META' is, when you do
ProjectRequest.objects
the default manager (objects) for models ProjectRequest is assign to the (overwritten) local variable request.

scrapy unhandled exception

I am using scrapy 0.16.2 version on linux. I'm running:
scrapy crawl mycrawlspider -s JOBDIR=/mnt/mycrawlspider
I'm getting this error which blocks scrapy (hangs and doesn't finish automatically, only ^C stops it)
2012-11-20 15:04:51+0000 [-] Unhandled Error Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/scrapy/commands/crawl.py", line 45, in run
self.crawler.start() File "/usr/lib/python2.7/site-packages/scrapy/crawler.py", line 80, in start
reactor.run(installSignalHandlers=False) # blocking call File "/usr/lib/python2.7/site-packages/twisted/internet/base.py", line 1169, in run
self.mainLoop() File "/usr/lib/python2.7/site-packages/twisted/internet/base.py", line 1178, in mainLoop
self.runUntilCurrent() --- <exception caught here> --- File "/usr/lib/python2.7/site-packages/twisted/internet/base.py", line 800, in runUntilCurrent
call.func(*call.args, **call.kw) File "/usr/lib/python2.7/site-packages/scrapy/utils/reactor.py", line 41, in __call__
return self._func(*self._a, **self._kw) File "/usr/lib/python2.7/site-packages/scrapy/core/engine.py", line 116, in
_next_request
self.crawl(request, spider) File "/usr/lib/python2.7/site-packages/scrapy/core/engine.py", line 172, in crawl
self.schedule(request, spider) File "/usr/lib/python2.7/site-packages/scrapy/core/engine.py", line 176, in schedule
return self.slots[spider].scheduler.enqueue_request(request) File "/usr/lib/python2.7/site-packages/scrapy/core/scheduler.py", line 48, in enqueue_request
if not request.dont_filter and self.df.request_seen(request): exceptions.AttributeError: 'NoneType' object has no attribute 'dont_filter'
BTW this worked in version 0.14
Here is the code:
class MySpider(CrawlSpider):
name = 'alrroya'
NEW_IGNORED_EXTENSIONS = list(IGNORED_EXTENSIONS)
NEW_IGNORED_EXTENSIONS.remove('pdf')
download_delay = 0.05
# Stay within these domains when crawling
allowed_domains = []
all_domains = {}
start_urls = []
# Add our callback which will be called for every found link
rules = [
Rule(SgmlLinkExtractor(deny_extensions=NEW_IGNORED_EXTENSIONS, tags=('a', 'area', 'frame', 'iframe'), attrs=('href', 'src')), follow=True, callback='parse_crawled_page')
]
# How many pages crawled
crawl_count = 0
# How many PDFs we have found
pdf_count = 0
def __init__(self, *args, **kwargs):
CrawlSpider.__init__(self, *args, **kwargs)
dispatcher.connect(self._spider_closed, signals.spider_closed)
dispatcher.connect(self._spider_opened, signals.spider_opened)
self.load_allowed_domains_and_start_urls()
def allowed_to_start(self):
curr_date = datetime.today()
curr_date = datetime(curr_date.year, curr_date.month, curr_date.day)
jobdir = self.settings['JOBDIR']
if jobdir:
mnt = os.path.dirname(os.path.normpath(jobdir))
else:
mnt = ''
checkfile = os.path.join(mnt, '%s.crawlercheck' % self.__class__.name)
day = timedelta(days=1)
if os.path.exists(checkfile):
f = open(checkfile, 'r')
data = f.read()
f.close()
data = data.split('\n')
reason = data[0]
try:
reason_date = datetime.strptime(data[1], '%Y-%m-%d')
except Exception as ex:
reason_date = None
if reason_date and 'shutdown' in reason:
reason = True
else:
if reason_date and reason_date + day <= curr_date and 'finished' in reason:
reason = True
else:
reason = False
else:
reason = True
return reason
def _spider_opened(self, spider):
if spider is not self:
return
curr_date = datetime.today()
curr_date = datetime(curr_date.year, curr_date.month, curr_date.day)
jobdir = spider.settings['JOBDIR']
if jobdir:
mnt = os.path.dirname(os.path.normpath(jobdir))
else:
mnt = ''
checkfile = os.path.join(mnt, '%s.crawlercheck' % self.__class__.name)
day = timedelta(days=1)
if os.path.exists(checkfile):
f = open(checkfile, 'r')
data = f.read()
f.close()
data = data.split('\n')
reason = data[0]
try:
reason_date = datetime.strptime(data[1], '%Y-%m-%d')
except Exception as ex:
reason_date = None
if reason_date and 'shutdown' in reason:
f = open(checkfile, 'w')
f.write('started\n')
f.write(str(date.today()))
f.close()
else:
if reason_date and reason_date + day <= curr_date and 'finished' in reason:
f = open(checkfile, 'w')
f.write('started\n')
f.write(str(date.today()))
f.close()
else:
crawler.engine.close_spider(self, 'finished')
if jobdir and os.path.exists(jobdir):
shutil.rmtree(jobdir)
f = open(checkfile, 'w')
f.write('finished\n')
f.write(str(date.today()))
f.close()
os._exit(1)
else:
f = open(checkfile, 'w')
f.write('started\n')
f.write(str(date.today()))
f.close()
def _spider_closed(self, spider, reason):
if spider is not self:
return
jobdir = spider.settings['JOBDIR']
if jobdir:
mnt = os.path.dirname(os.path.normpath(jobdir))
else:
mnt = ''
checkfile = os.path.join(mnt, '%s.crawlercheck' % self.__class__.name)
if 'shutdown' in reason:
f = open(checkfile, 'w')
f.write('shutdown\n')
f.write(str(date.today()))
f.close()
else:
if jobdir and os.path.exists(jobdir):
shutil.rmtree(jobdir)
f = open(checkfile, 'w')
f.write('finished\n')
f.write(str(date.today()))
f.close()
def _requests_to_follow(self, response):
if getattr(response, 'encoding', None) != None:
return CrawlSpider._requests_to_follow(self, response)
else:
return []
def make_requests_from_url(self, url):
http_client = httplib2.Http()
try:
headers = {
'content-type': 'text/html',
'user-agent': random.choice(USER_AGENT_LIST)
}
response, content = http_client.request(url, method='HEAD', headers=headers)
#~ if 'pdf' in response['content-type'].lower() or (url.endswith('.pdf') and 'octet-stream' in response['content-type'].lower()):
if 'pdf' in response['content-type'].lower() or 'octet-stream' in response['content-type'].lower():
if self.allowed_to_start():
self.get_pdf_link(url)
else:
return CrawlSpider.make_requests_from_url(self, url)
except Exception as ex:
return CrawlSpider.make_requests_from_url(self, url)
def get_pdf_link(self, url):
source = self.__class__.name
parsed_url = urlparse(url)
url_domain = parsed_url.netloc
url_path = parsed_url.path
if url_domain:
for domain, paths in self.__class__.all_domains[source]['allow_domains'].iteritems():
if url_domain.endswith(domain):
pre_and = False
pre_or = False
and_cond = True
or_cond = False
for path in paths:
if path[0:1] == '!':
pre_and = True
if path[1:] not in url_path:
and_cond = and_cond and True
else:
and_cond = and_cond and False
else:
pre_or = True
if path in url_path:
or_cond = or_cond or True
else:
or_cond = or_cond or False
if pre_and and pre_or:
if and_cond and or_cond:
self.pdf_process(source, url)
return
elif pre_and:
if and_cond:
self.pdf_process(source, url)
return
elif pre_or:
if or_cond:
self.pdf_process(source, url)
return
else:
self.pdf_process(source, url)
return
def parse_crawled_page(self, response):
self.__class__.crawl_count += 1
crawl_count = self.__class__.crawl_count
if crawl_count % 100 == 0:
print 'Crawled %d pages' % crawl_count
if 'pdf' in response.headers.get('content-type', '').lower():
self.get_pdf_link(response.url)
return Item()
def load_allowed_domains_and_start_urls(self):
day = timedelta(days=1)
currdate = date.today()
alrroya = ('http://epaper.alrroya.com/currentissues.php?editiondt=' + currdate.strftime('%Y/%m/%d'),)
self.__class__.all_domains = {
'alrroya': {
'start_urls': alrroya,
'allow_domains': {
'epaper.alrroya.com': frozenset(()),
}
}
}
for domain in self.__class__.all_domains[self.__class__.name]['allow_domains']:
self.__class__.allowed_domains.append(domain)
self.__class__.start_urls.extend(self.__class__.all_domains[self.__class__.name]['start_urls'])
def pdf_process(self, source, url):
print '!!! ' + source + ' ' + url
This appears to be a bug in Scrapy. The current version doesn't seem to accept lists returned from make_requests_from_url(). I was able to modify the Scrapy code in the following way to work around the issue.
In the file Scrapy-0.16.5-py2.7.egg/scrapy/spider.py
Change:
def start_requests(self):
for url in self.start_urls:
yield self.make_requests_from_url(url)
To:
def start_requests(self):
for url in self.start_urls:
requests = self.make_requests_from_url(url)
if type(requests) is list:
for request in requests:
yield request
else:
yield requests
I expect that the official Scrapy people will fix this eventually.