Open Binary File in a URL - openerp-8

In Document Module when I click on a link of binary file , it will be downloaded as JSON Object.
How can I open it directly in Web Browser.
I found the code implement the downloading attachment file for Document Module :
enter co#http.route('/web/binary/saveas_ajax', type='http', auth="user")
#serialize_exception
def saveas_ajax(self, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = { field: data }
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field, ''))
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))],
cookies={'fileToken': token})de here

Related

DISCORD.PY Creating an end Giveaway Command

I am working on a giveaway bot and after doing the start and reroll command I have run into the end command , which i cannot fully grasp how to do it. I thought to register something when the giveaway is created (msgID of the giveaway im registering) in my aiosqlite database and for the end function , i could be able to fetch it and stop the giveaway. Now here is the thing, i cant think of a function or a task that will end the giveaway or somehow just end the duration.
For reference here is my start command :
class Start(commands.Cog):
def __init__(self, client):
self.client = client
def convert(self, timer):
pos = ["s", "m", "h", "d"]
time_dict = {"s" : 1, "m" : 60, "h" : 3600, "d" : 3600*24}
unit = timer[-1]
if unit not in pos:
return -1
try:
val = int(timer[:-1])
except:
return -2
return val * time_dict[unit]
#commands.command()
async def start(self, ctx, duration, winners: str, *, prize):
timer = (self.convert(duration))
winners = int(winners.replace("w",""))
await ctx.message.delete()
timestamp = time.time() + timer
epoch_time = int((time.time() + timer))
embed = discord.Embed(title = f"{prize}", description = f'React with 🎉 to enter\nEnds: <t:{epoch_time}:R> (<t:{epoch_time}>)\nHosted by {ctx.author.mention}\n', color =
ctx.author.color, timestamp=(datetime.datetime.utcfromtimestamp(timestamp)))
embed.set_footer(text=f'Winners : {winners} | Ends at \u200b')
gaw_msg = await ctx.send(content = "<:spadess:939938117736091678> **GIVEAWAY** <:spadess:939938117736091678>",embed=embed)
await gaw_msg.add_reaction("🎉")
db = await aiosqlite.connect("main.db")
cursor = await db.cursor()
await cursor.execute(f'SELECT * FROM storingStuff WHERE msgID = {gaw_msg.id}')
data = await cursor.fetchone()
if data is None:
await cursor.execute(f'INSERT INTO storingStuff (msgID, guildID) VALUES({gaw_msg.guild.id} , {gaw_msg.id})')
await db.commit()
await cursor.close()
await db.close()
await asyncio.sleep(timer)
new_msg = await ctx.channel.fetch_message(gaw_msg.id)
users_mention = []
for i in range(winners):
users = await new_msg.reactions[0].users().flatten()
users.pop(users.index(self.client.user))
winner = random.choice(users)
users_mention.append(winner.mention)
users.remove(winner)
displayed_winners = ",".join(users_mention)
endembed = discord.Embed(title=f"{prize}", description=f"Winner: {displayed_winners}\nHosted by: {ctx.author.mention}", color = ctx.author.color, timestamp=(datetime.datetime.utcfromtimestamp(timestamp)))
endembed.set_footer(text= 'Ended at \u200b')
await gaw_msg.edit(content = "<:done:939940228746072096> **GIVEAWAY ENDED** <:done:939940228746072096>",embed=endembed)
await ctx.send(f"Congragulations {displayed_winners}! You won the **{prize}**.\n{gaw_msg.jump_url}")
def setup(client):
client.add_cog(Start(client))
Any help with the case would be appreciated , or any code reference as I'm pretty new. Thank you for spending your time and reading this.

why does JSON dump doesn't work in my code?

I'm trying to put python objects into a JSON file by getting the API from one of the sites but somehow when I run the code nothing has been put in the JSON file. API is working well, as well when I print out the code by json.load I get the output but I have no idea why does dump doesn't work.
here is my code:
from django.shortcuts import render
import requests
import json
import datetime
import re
def index(request):
now = datetime.datetime.now()
format = "{}-{}-{}".format(now.year, now.month, now.day)
source = []
author = []
title = []
date = []
url = "http://newsapi.org/v2/everything"
params = {
'q': 'bitcoin',
'from': format,
'sortBy': 'publishedAt',
'apiKey': '1186d3b0ccf24e6a91ab9816de603b90'
}
response = requests.request("GET", url, params=params)
for news in response.json()['articles']:
matching = re.match("\d+-\d+-\d+", news['publishedAt'])
if format == matching.group():
source.append(news['source'])
author.append(news['author'])
title.append(news['title'])
date.append(news['publishedAt'])
data = \
{
'source': source,
'author': author,
'title': title,
'date': date
}
with open('data.json', "a+") as fp:
x = json.dump(data, fp, indent=4)
return render(request, 'news/news.html', {'response': response})

How to change users role based on json file listed value with discord bot

I have created a discord bot which has a level system for users to level up, they can check their level and get exp from each message sent which is stored in a json file. I would like to add a function to this code which changes a users role at a particular level. I understand I need to have an if statement involving the lvl_end variable however i'm not quite sure how this code should look. Here's my code, i'd really appreciate some help with this.
import discord
import json
import asyncio
import os
import time
import random
from discord.ext import commands
TOKEN =
client = commands.Bot(command_prefix = '!')
#client.event
async def on_ready():
print("Bot is online and ready to connect to server")
await client.change_presence(game=discord.Game(name='SHADOWSMAR is cool!'))
#client.command(pass_context=True)
async def clear(ctx, amount=100):
channel = ctx.message.channel
messages = []
async for message in client.logs_from(channel, limit=int(amount)):
messages.append(message)
await client.delete_messages(messages)
await client.say('Messages deleted')
#client.event
async def on_member_join(member):
role = discord.utils.get(member.server.roles, name='Tourist')
await client.add_roles(member, role)
with open('users.json', 'r') as f:
users = json.load(f)
await update_data(users, member)
with open('users.json', 'w') as f:
json.dump(users, f)
#client.event
async def on_message(message):
await client.process_commands(message)
with open('users.json', 'r') as f:
users = json.load(f)
upgrade = random.randint(5,10)
await update_data(users, message.author)
await add_experience(users, message.author, upgrade)
await level_up(users, message.author, message.channel)
with open('users.json', 'w') as f:
json.dump(users, f)
#Update data
async def update_data(users, user):
if not user.id in users:
users[user.id] = {}
users[user.id]['experience'] = 0
users[user.id]['level'] = 1
#Get xp per message
async def add_experience(users, user, exp):
users[user.id]['experience'] += exp
#Level up
async def level_up(users, user, channel):
experience = users[user.id]['experience']
lvl_start = users[user.id]['level']
lvl_end = int(experience ** 1.75)
if lvl_start < lvl_end and lvl_end == 5 or lvl_end == 10 or lvl_end == 15 or lvl_end == 20:
await client.send_message(channel, '{} has leveld up to level {}'.format(user.mention, lvl_end))
users[user.id]['level'] = lvl_end
#Rank check
#client.command(pass_context=True)
async def rank(ctx, user : discord.Member=None):
if user is None:
user = ctx.message.author
with open('users.json', 'r') as f:
users = json.load(f)
lvl_end = users[user.id]['level']
exp = users[user.id]['experience']
rank = discord.Embed(name="{}'s rank is".format(user.name), colour = 0xec134b)
rank.add_field(name="{}'s rank:".format(user.name), value="{}".format(lvl_end))
rank.add_field(name="total experience points:", value="{}".format(exp), inline=True)
rank.add_field(name="Highest role", value=user.top_role)
rank.set_footer(text="Thanks for being part of the community :D")
rank.set_thumbnail(url=user.avatar_url)
await client.say(embed=rank)
client.run(TOKEN)
You could have a dictionary set up where the users level would correspond to their rank and check for that rank every time they write a message
For example something like this:
from discord.utils import get
#client.event
async def on_message(message):
user = message.author
role_dict = {1:"role#1_name",2:"role#2_name",3:"role#3_name"}
roles = message.server.roles
try:
for key,value in role_dict.items():
if value in [rl.name for rl in user.roles]:
await client.remove_roles(user,get(roles,name = value))
except discord.Forbidden:
print("Bot permissions aren't high enough to remove roles from this user")
with open('users.json', 'r') as f:
users = json.load(f)
await client.add_roles(user,get(roles,name = role_dict[users[user.id]['level']]))

TypeError: string indices must be integers

Hi i have a problem with my code that i get a error in a loop that works for a few times but then throws me a typeerro: string indices must be integers.
I want to call an api to get a json back and get some parts of the json response. heres the code:
class API(object):
def __init__(self, api_key):
self.api_key = api_key
def _request(self, api_url, params={}):
args = {'api_key': self.api_key}
for key, value in params.items():
if key not in args:
args[key] = value
response = requests.get(
Consts.URL['base'].format(
url=api_url
),
params=args
)
if response.status_code == requests.codes.ok:
return response.json()
else:
return "not possible"
print(response.url)
def get_list(self):
excel = EXCEL('s6.xlsx')
api_url = Consts.URL['list'].format(
version = Consts.API_VERSIONS['matchversion'],
start = excel.get_gamenr()
)
return self._request(api_url)
def get_match(self, matchid):
idlist = matchid
api_url = Consts.URL['match'].format(
version = Consts.API_VERSIONS['matchversion'],
matchId = idlist
)
return self._request(api_url)
def match_ids(self):
api = API('c6ea2f68-7ed6-40fa-9b99-fd591c55c05f')
x = api.get_list()
y = x['matches']
count = len(y)
ids = []
while count > 0:
count = count - 1
temp = y[0]
ids.append(temp['matchId'])
del y[0]
return ids
def match_info(self):
matchids = self.match_ids()
print(matchids)
matchinfolist = {}
counter = 1
for gameids in matchids:
info = self.get_match(gameids)
myid = self.find_partid(info['participantIdentities'])
prepdstats = info['participants'][myid-1]
print(prepdstats)
matchinfolist['stats' + str(counter)] = prepdstats
return matchinfolist
def find_partid(self, partlist):
partid = 0
idlist = partlist
while partid < 10:
partid = partid + 1
tempplayer = idlist[0]['player']
if tempplayer['summonerId'] == 19204660:
playernr = partid
partid = 500
del idlist[0]
return playernr
when i run the match_info() function i get this error
Traceback (most recent call last):
File "C:\Users\Niklas\Desktop\python riot\main.py", line 17, in <module>
main()
File "C:\Users\Niklas\Desktop\python riot\main.py", line 10, in main
print(api.match_info())
File "C:\Users\Niklas\Desktop\python riot\api.py", line 78, in match_info
myid = self.find_partid(info['participantIdentities'])
TypeError: string indices must be integers
but only after the loop in the function has run for a few times. I have no idea what im doing wrong. Any help would be nice.
Here is a link to the json: https://euw.api.pvp.net/api/lol/euw/v2.2/match/2492271473?api_key=c6ea2f68-7ed6-40fa-9b99-fd591c55c05f
The error shows up on
myid = self.find_partid(info['participantIdentities'])
For this line to execute, info must be a mapping with string keys, not a string itself. info is
info = self.get_match(gameids)
get_match ends with
return self._request(api_url)
_request ends with
if response.status_code == requests.codes.ok:
return response.json()
else:
return "not possible"
For the loop to ever run, response.json() must be a dict with key 'participantIdentities'. Your bug is expecting that to always be true.
One fix might be to make the expectation always ture. If there is a satisfactory default value, return {'participantIdentities': <default value>}. Otherwise, return None and change the loop to
info = self.get_match(gameids)
if info is not None:
# as before
else:
# whatever default action you want

scrapy unhandled exception

I am using scrapy 0.16.2 version on linux. I'm running:
scrapy crawl mycrawlspider -s JOBDIR=/mnt/mycrawlspider
I'm getting this error which blocks scrapy (hangs and doesn't finish automatically, only ^C stops it)
2012-11-20 15:04:51+0000 [-] Unhandled Error Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/scrapy/commands/crawl.py", line 45, in run
self.crawler.start() File "/usr/lib/python2.7/site-packages/scrapy/crawler.py", line 80, in start
reactor.run(installSignalHandlers=False) # blocking call File "/usr/lib/python2.7/site-packages/twisted/internet/base.py", line 1169, in run
self.mainLoop() File "/usr/lib/python2.7/site-packages/twisted/internet/base.py", line 1178, in mainLoop
self.runUntilCurrent() --- <exception caught here> --- File "/usr/lib/python2.7/site-packages/twisted/internet/base.py", line 800, in runUntilCurrent
call.func(*call.args, **call.kw) File "/usr/lib/python2.7/site-packages/scrapy/utils/reactor.py", line 41, in __call__
return self._func(*self._a, **self._kw) File "/usr/lib/python2.7/site-packages/scrapy/core/engine.py", line 116, in
_next_request
self.crawl(request, spider) File "/usr/lib/python2.7/site-packages/scrapy/core/engine.py", line 172, in crawl
self.schedule(request, spider) File "/usr/lib/python2.7/site-packages/scrapy/core/engine.py", line 176, in schedule
return self.slots[spider].scheduler.enqueue_request(request) File "/usr/lib/python2.7/site-packages/scrapy/core/scheduler.py", line 48, in enqueue_request
if not request.dont_filter and self.df.request_seen(request): exceptions.AttributeError: 'NoneType' object has no attribute 'dont_filter'
BTW this worked in version 0.14
Here is the code:
class MySpider(CrawlSpider):
name = 'alrroya'
NEW_IGNORED_EXTENSIONS = list(IGNORED_EXTENSIONS)
NEW_IGNORED_EXTENSIONS.remove('pdf')
download_delay = 0.05
# Stay within these domains when crawling
allowed_domains = []
all_domains = {}
start_urls = []
# Add our callback which will be called for every found link
rules = [
Rule(SgmlLinkExtractor(deny_extensions=NEW_IGNORED_EXTENSIONS, tags=('a', 'area', 'frame', 'iframe'), attrs=('href', 'src')), follow=True, callback='parse_crawled_page')
]
# How many pages crawled
crawl_count = 0
# How many PDFs we have found
pdf_count = 0
def __init__(self, *args, **kwargs):
CrawlSpider.__init__(self, *args, **kwargs)
dispatcher.connect(self._spider_closed, signals.spider_closed)
dispatcher.connect(self._spider_opened, signals.spider_opened)
self.load_allowed_domains_and_start_urls()
def allowed_to_start(self):
curr_date = datetime.today()
curr_date = datetime(curr_date.year, curr_date.month, curr_date.day)
jobdir = self.settings['JOBDIR']
if jobdir:
mnt = os.path.dirname(os.path.normpath(jobdir))
else:
mnt = ''
checkfile = os.path.join(mnt, '%s.crawlercheck' % self.__class__.name)
day = timedelta(days=1)
if os.path.exists(checkfile):
f = open(checkfile, 'r')
data = f.read()
f.close()
data = data.split('\n')
reason = data[0]
try:
reason_date = datetime.strptime(data[1], '%Y-%m-%d')
except Exception as ex:
reason_date = None
if reason_date and 'shutdown' in reason:
reason = True
else:
if reason_date and reason_date + day <= curr_date and 'finished' in reason:
reason = True
else:
reason = False
else:
reason = True
return reason
def _spider_opened(self, spider):
if spider is not self:
return
curr_date = datetime.today()
curr_date = datetime(curr_date.year, curr_date.month, curr_date.day)
jobdir = spider.settings['JOBDIR']
if jobdir:
mnt = os.path.dirname(os.path.normpath(jobdir))
else:
mnt = ''
checkfile = os.path.join(mnt, '%s.crawlercheck' % self.__class__.name)
day = timedelta(days=1)
if os.path.exists(checkfile):
f = open(checkfile, 'r')
data = f.read()
f.close()
data = data.split('\n')
reason = data[0]
try:
reason_date = datetime.strptime(data[1], '%Y-%m-%d')
except Exception as ex:
reason_date = None
if reason_date and 'shutdown' in reason:
f = open(checkfile, 'w')
f.write('started\n')
f.write(str(date.today()))
f.close()
else:
if reason_date and reason_date + day <= curr_date and 'finished' in reason:
f = open(checkfile, 'w')
f.write('started\n')
f.write(str(date.today()))
f.close()
else:
crawler.engine.close_spider(self, 'finished')
if jobdir and os.path.exists(jobdir):
shutil.rmtree(jobdir)
f = open(checkfile, 'w')
f.write('finished\n')
f.write(str(date.today()))
f.close()
os._exit(1)
else:
f = open(checkfile, 'w')
f.write('started\n')
f.write(str(date.today()))
f.close()
def _spider_closed(self, spider, reason):
if spider is not self:
return
jobdir = spider.settings['JOBDIR']
if jobdir:
mnt = os.path.dirname(os.path.normpath(jobdir))
else:
mnt = ''
checkfile = os.path.join(mnt, '%s.crawlercheck' % self.__class__.name)
if 'shutdown' in reason:
f = open(checkfile, 'w')
f.write('shutdown\n')
f.write(str(date.today()))
f.close()
else:
if jobdir and os.path.exists(jobdir):
shutil.rmtree(jobdir)
f = open(checkfile, 'w')
f.write('finished\n')
f.write(str(date.today()))
f.close()
def _requests_to_follow(self, response):
if getattr(response, 'encoding', None) != None:
return CrawlSpider._requests_to_follow(self, response)
else:
return []
def make_requests_from_url(self, url):
http_client = httplib2.Http()
try:
headers = {
'content-type': 'text/html',
'user-agent': random.choice(USER_AGENT_LIST)
}
response, content = http_client.request(url, method='HEAD', headers=headers)
#~ if 'pdf' in response['content-type'].lower() or (url.endswith('.pdf') and 'octet-stream' in response['content-type'].lower()):
if 'pdf' in response['content-type'].lower() or 'octet-stream' in response['content-type'].lower():
if self.allowed_to_start():
self.get_pdf_link(url)
else:
return CrawlSpider.make_requests_from_url(self, url)
except Exception as ex:
return CrawlSpider.make_requests_from_url(self, url)
def get_pdf_link(self, url):
source = self.__class__.name
parsed_url = urlparse(url)
url_domain = parsed_url.netloc
url_path = parsed_url.path
if url_domain:
for domain, paths in self.__class__.all_domains[source]['allow_domains'].iteritems():
if url_domain.endswith(domain):
pre_and = False
pre_or = False
and_cond = True
or_cond = False
for path in paths:
if path[0:1] == '!':
pre_and = True
if path[1:] not in url_path:
and_cond = and_cond and True
else:
and_cond = and_cond and False
else:
pre_or = True
if path in url_path:
or_cond = or_cond or True
else:
or_cond = or_cond or False
if pre_and and pre_or:
if and_cond and or_cond:
self.pdf_process(source, url)
return
elif pre_and:
if and_cond:
self.pdf_process(source, url)
return
elif pre_or:
if or_cond:
self.pdf_process(source, url)
return
else:
self.pdf_process(source, url)
return
def parse_crawled_page(self, response):
self.__class__.crawl_count += 1
crawl_count = self.__class__.crawl_count
if crawl_count % 100 == 0:
print 'Crawled %d pages' % crawl_count
if 'pdf' in response.headers.get('content-type', '').lower():
self.get_pdf_link(response.url)
return Item()
def load_allowed_domains_and_start_urls(self):
day = timedelta(days=1)
currdate = date.today()
alrroya = ('http://epaper.alrroya.com/currentissues.php?editiondt=' + currdate.strftime('%Y/%m/%d'),)
self.__class__.all_domains = {
'alrroya': {
'start_urls': alrroya,
'allow_domains': {
'epaper.alrroya.com': frozenset(()),
}
}
}
for domain in self.__class__.all_domains[self.__class__.name]['allow_domains']:
self.__class__.allowed_domains.append(domain)
self.__class__.start_urls.extend(self.__class__.all_domains[self.__class__.name]['start_urls'])
def pdf_process(self, source, url):
print '!!! ' + source + ' ' + url
This appears to be a bug in Scrapy. The current version doesn't seem to accept lists returned from make_requests_from_url(). I was able to modify the Scrapy code in the following way to work around the issue.
In the file Scrapy-0.16.5-py2.7.egg/scrapy/spider.py
Change:
def start_requests(self):
for url in self.start_urls:
yield self.make_requests_from_url(url)
To:
def start_requests(self):
for url in self.start_urls:
requests = self.make_requests_from_url(url)
if type(requests) is list:
for request in requests:
yield request
else:
yield requests
I expect that the official Scrapy people will fix this eventually.