Remove line of text from a Json file - json

I am programming an economic bot with items, inventory, currency and much more, but now when I sell my item in my inventory it is still there in the embed, but I want that when I sell my item and the number in the inventory is 0 that this is then no longer displayed in the inventory embed, so to speak, is removed from the Json file
My code for the sell command:
#client.command()
async def sell(ctx,item,amount = 1):
await open_account(ctx.author)
res = await sell_this(ctx.author,item,amount)
em1 = discord.Embed(title=f"{ctx.author.name}",
description="Das item konnte nicht in deinem Inventar gefunden werden",
color=0xe67e22)
em1.set_thumbnail(url=ctx.author.avatar_url)
em2 = discord.Embed(title=f"{ctx.author.name}",
description=f"Du hast keine {amount} {item} in deinem inventar",
color=0xe67e22)
em2.set_thumbnail(url=ctx.author.avatar_url)
em3 = discord.Embed(title=f"{ctx.author.name}",
description=f"Du hast das Item: **{item}** nicht in deinem Inventar",
color=0xe67e22)
em3.set_thumbnail(url=ctx.author.avatar_url)
em4 = discord.Embed(title=f"{ctx.author.name}",
description=f"Du hast {amount} {item} gekauft",
color=0xe67e22)
em4.set_thumbnail(url=ctx.author.avatar_url)
if not res[0]:
if res[1]==1:
await ctx.send(embed=em1)
return
if res[1]==2:
await ctx.send(embed=em2)
return
if res[1]==3:
await ctx.send(embed=em3)
return
await ctx.send(embed=em4)
async def sell_this(user,item_name,amount,price = None):
item_name = item_name.lower()
name_ = None
for item in mainshop:
name = item["name"].lower()
if name == item_name:
name_ = name
if price==None:
price = 0.9* item["price"]
break
if name_ == None:
return [False,1]
cost = price*amount
users = await get_bank_data()
bal = await update_bank(user)
try:
index = 0
t = None
for thing in users[str(user.id)]["bag"]:
n = thing["item"]
if n == item_name:
old_amt = thing["amount"]
new_amt = old_amt - amount
if new_amt < 0:
return [False,2]
users[str(user.id)]["bag"][index]["amount"] = new_amt
t = 1
break
index+=1
if t == None:
return [False,3]
except:
return [False,3]
with open("Bank.json","w") as f:
json.dump(users,f)
await update_bank(user,cost,"wallet")
return [True,"Worked"]
I hope someone can help me

Use del to delete the item in question from the json data:
try:
index = 0
t = None
for thing in users[str(user.id)]["bag"]:
n = thing["item"]
if n == item_name:
old_amt = thing["amount"]
new_amt = old_amt - amount
if new_amt < 0:
return [False,2]
elif new_amt == 0: # Check if amount is 0
del users[str(user.id)]["bag"][index] # Delete item from bag
else:
users[str(user.id)]["bag"][index]["amount"] = new_amt
t = 1
break
index+=1
if t == None:
return [False,3]
except:
return [False,3]
This completely removes the item and amount from the 'bag'.

Related

Get row and column when checkbox clicked in QtableView

I have a QtableView with data from a file. I added 3 columns with checkboxes, but now I need to know what checkbox is clicked (Row, column, data). This is working for the cell itself but when clicking the checkbox it gives the latest data or if no data it's -1 and None.
I tried to remove a lot of code so I hope this is not to much or to less.
self.model = TableModel([headers, newRows])
self.proxy_model.setSourceModel(self.model)
self.tableView.setModel(self.proxy_model)
self.tableView.clicked.connect(self.cellClicked)
def cellClicked(self):
try:
index = self.tableView.selectionModel().currentIndex()
row = index.row()
col = index.column()
data = index.data()
# print(index.sibling(row,col).data())
if isinstance(data, QtWidgets.QCheckBox):
print(f'Child: {index.child.row()}')
data = data.text()
print(data.isChecked())
print(f'Row:\t{row}\nColumn:\t{col}\nData:\t{data}\n')
except Exception as e:
print(e)
class TableModel(QAbstractTableModel):
def __init__(self, data):
super().__init__()
self.checks = {}
self.headers = data[0]
self.rows = data[1]
def data(self, index, role):
try:
if role == Qt.ItemDataRole.DisplayRole or role == Qt.ItemDataRole.EditRole:
return self.rows[index.row()][index.column()]
elif role == Qt.ItemDataRole.CheckStateRole and (index.column() == 0 or index.column() == 6 or index.column() == 7):
return self.checkState(QPersistentModelIndex(index))
def setData(self, index, value, role = Qt.ItemDataRole.EditRole):
if value is not None and role == Qt.ItemDataRole.EditRole:
self.rows[index.row()][index.column()] = value
# self.dataChanged.emit(index, index)
return True
elif not index.isValid():
return False
elif role == Qt.ItemDataRole.CheckStateRole:
self.checks[QPersistentModelIndex(index)] = value
return True
return False
def checkState(self, index):
if index in self.checks.keys():
return self.checks[index]
else:
return Qt.CheckState.Unchecked
def flags(self, index):
col = index.column()
if col == 0 or col == 6 or col == 7:
return Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsEditable | Qt.ItemFlag.ItemIsUserCheckable
else:
return Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable | Qt.ItemFlag.ItemIsEditable

dictionary value is dict but printing as string in json dump

I have a script that is working fine except for this tiny issue. My script is looping over list items and appending a json string over a loop and then doing json dump to file.
My json string:
main_json = {"customer": {"main_address": "","billing_address": "","invoice_reference": "","product": []}}
main loop:
for row in result:
account_id = ACCOUNTID_DATA_CACHE.get(row['customer.main_address.customer_id'])
if account_id is None or account_id != row['customer.main_address.customer_id']:
if main_json:
results.append(main_json)
main_json = {"customer": {"main_address": "","billing_address": "","invoice_reference": "","product": []}}
main_address = {}
billing_address = {}
for key,value in row.items():
if key.startswith('customer.main_address'):
main_address[key.split(".")[2]] = value
if key.startswith('customer.billing_address'):
billing_address[key.split(".")[2]] = value
billing_address_copy = billing_address.copy()
for mkey,mvalue in main_address.items():
for bkey,bvalue in billing_address_copy.items():
if str(bvalue) == str(mvalue):
bvalue = ''
billing_address_copy[bkey] = bvalue
if all(value == '' for value in billing_address_copy.values()) is True:
main_json['customer']['billing_address'] = ''
else:
main_json['customer']['billing_address'] = billing_address
main_json['customer']['main_address'] = main_address
product = parse_products(row)
main_json['customer']['product'].append(product)
...
def parse_products(row):
product = {}
x = {}
for key,value in row.items():
if key.startswith('customer.product'):
product[key.split(".")[2]] = value
if key.startswith('customer.product.custom_attributes'):
x['domain'] = value
print(x)
product[key.split(".")[2]] = x
if key == 'start_date' or 'renewal_date':
value = str(value)
product[key] = value
return product
In this part below, how do make sure that the value is not a string when dumped?
if key.startswith('customer.product.custom_attributes'):
x['domain'] = value
print(x)
product[key.split(".")[2]] = x
Because in the output I'm getting:
{
"custom_attributes": "{'domain': 'somedomain.com'}",
"description": "some_description",
"discount": "0.00"}
When what I really want is:
{
"custom_attributes": {"domain": "somedomain.com"},
"description": "some_description",
"discount": "0.00"}
EDIT: how i'm dumping:
with open('out.json', 'w') as jsonout:
json.dump(main_json, jsonout, sort_keys=True, indent=4)
Well, this IF is flawed and always TRUE:
if key == 'start_date' or 'renewal_date':
So you are converting everything to str()

How to add extra fields in ValueQuerySet (Django)?

Basically, I want to convert the query_set to JSON. But I also want to add one more field something like size = some number in the query_set which is not present in the query_set attributes (it is computed attribute). Can you tell me how to do it?
query_set = PotholeCluster.objects.all().values('bearing', 'center_lat', 'center_lon', 'grid_id')
return JsonResponse(list(query_set), safe=False)
I tried the code below. It works, but I would like to know if there is any cleaner way to do this.
query_set = PotholeCluster.objects.all()
response_list = []
for pc in query_set:
d = {}
d['bearing'] = pc.get_bearing()
d['center_lat'] = pc.center_lat
d['center_lon'] = pc.center_lat
d['grid_id'] = pc.grid_id
d['size'] = pc.pothole_set.all().count()
response_list.append(d)
serialized = json.dumps(response_list)
return HttpResponse(serialized, content_type='application/json')
class PotholeCluster(models.Model):
center_lat = models.FloatField(default=0)
center_lon = models.FloatField(default=0)
snapped_lat = models.FloatField(default=0)
snapped_lon = models.FloatField(default=0)
size = models.IntegerField(default=-1)
# avgspeed in kmph
speed = models.FloatField(default=-1)
# in meters
accuracy = models.FloatField(default=-1)
# avg bearing in degree
bearing = models.FloatField(default=-1)
grid = models.ForeignKey(
Grid,
on_delete=models.SET_NULL,
null=True,
blank=True
)
def __str__(self):
raw_data = serialize('python', [self])
output = json.dumps(raw_data[0]['fields'])
return "pk = {}|{}".format(self.id, output)
def get_bearing(self):
if self.bearing != -1:
return self.bearing
potholes = self.pothole_set.all()
bearings = [pothole.location.bearing for pothole in potholes]
bearings.sort()
i = 0
if bearings[-1] >= 350:
while bearings[-1] - bearings[i] >= 340:
if bearings[i] <= 10:
bearings[i] += 360
i += 1
self.bearing = sum(bearings) / len(bearings) % 360
self.save()
return self.bearing
def get_size(self):
if self.size != -1:
return self.size
self.size = len(self.pothole_set.all())
self.save()
return self.size

Scrapy MySQL pipeline: spider closed before pipeline finished

I use scrapy to crawl a page which contains a list of items, and I save each of the item in MySQL databases.
But the problem is that I found spider closed before all items are stored in mysql. Each time I ran the spider the result count is different.
Could you please help let me know how to solve this?
Below is my sample code:
Spider
class FutianSpider(scrapy.Spider):
name = 'futian_soufang'
allowed_domain = ["fang.com"]
start_urls = []
def __init__(self, category=None, *args, **kwargs):
self.count = 0
pass
def closed(self, reason):
print "*" * 20 + str(self.count)
def start_requests(self):
url = "http://fangjia.fang.com/pghouse-c0sz/a085-h321-i3{}/"
response = requests.get(url.format(1))
response.encoding = 'gb2312'
strpages = Selector(text=response.text).xpath('//p[contains(#class, "pages")]/span[last()]/a/text()').extract()
# print response.text
pages = int(strpages[0])
for num in range(1, pages + 1):
yield scrapy.Request(url.format(num), callback=self.parse_page)
def parse_page(self, response):
houses = response.xpath("//div[#class='list']//div[#class='house']")
for house in houses:
# house = Selector(house.decode("UTF-8", 'ignore'))
self.count += 1
housespan_hyperlink = house.xpath(".//span[#class='housetitle']/a")
house_title = housespan_hyperlink.xpath("text()").extract()[0].strip()
house_link_rel = housespan_hyperlink.xpath("#href").extract()[0].strip()
house_link = response.urljoin(house_link_rel)
# if isinstance(house_link_rel, list) and len(house_link_rel) > 0:
# house_link = response.urljoin(house_link_rel)
address = house.xpath(".//span[#class='pl5']/text()").extract()[0].strip()
esf_keyword = u'二手房'
esf_span = house.xpath(".//span[contains(text(),'%s')]" % (esf_keyword))
esf_number = esf_span.xpath("./a/text()").extract()[0].strip()
esf_number = int(re.findall(r"\d+", esf_number)[0])
esf_link = esf_span.xpath("./a/#href").extract()[0].strip()
zf_hyperlink = house.xpath(".//span[#class='p110']/a")
zf_number = zf_hyperlink.xpath("text()").extract()[0].strip()
zf_number = int(re.findall(r"\d+", zf_number)[0])
zf_link = zf_hyperlink.xpath("#href").extract()[0].strip()
price = 0
try:
price = int(house.xpath(".//span[#class='price']/text()").extract()[0].strip())
except:
None
change = 0.0
try:
increase_span = house.xpath(".//span[contains(#class, 'nor')]")
changetext = increase_span.xpath("text()").extract()[0].strip()
change = float(changetext[:changetext.index('%')])
if len(increase_span.css(".green-down")) > 0:
change *= -1
except:
None
print house_title, house_link, address, esf_number, esf_link, zf_number, zf_link, price, change
item = XiaoquItem(
title=house_title,
url=house_link,
address=address,
esf_number=esf_number,
esf_link=esf_link,
zf_number=zf_number,
zf_link=zf_link,
price=price,
change=change
)
yield item
Item:
class XiaoquItem(Item):
# define the fields for your item here like:
title = Field()
url = Field()
address = Field()
esf_number = Field()
esf_link = Field()
zf_number = Field()
zf_link = Field()
price = Field()
change = Field()
Pipeline:
class MySQLPipeLine(object):
def __init__(self):
settings = get_project_settings()
dbargs = settings.get('DB_CONNECT')
db_server = settings.get('DB_SERVER')
dbpool = adbapi.ConnectionPool(db_server, **dbargs)
self.dbpool = dbpool
def close_spider(self, spider):
self.dbpool.close()
def process_item(self, item, spider):
if isinstance(item, XiaoquItem):
self._process_plot(item)
elif isinstance(item, PlotMonthlyPriceItem):
self._process_plot_price(item)
return item
def _process_plot(self, item):
# run db query in thread pool
query = self.dbpool.runInteraction(self._conditional_insert, item)
query.addErrback(self._handle_error, item)
# query.addBoth(lambda _: item)
def _conditional_insert(self, conn, item):
# create record if doesn't exist.
# all this block run on it's own thread
conn.execute("select * from houseplot where title = %s", item["title"])
result = conn.fetchone()
if result:
log.msg("Item already stored in db: %s" % item, level = log.DEBUG)
else:
conn.execute("insert into houseplot(title, url, address, esf_number, esf_link, zf_number, zf_link, price, price_change, upsert_time) values (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", \
(item["title"], item["url"], item["address"], int(item["esf_number"]), item["esf_link"], item["zf_number"], item["zf_link"], item["price"], item["change"], datetime.datetime.now())
)
log.msg("Item stored in db: %s" % item, level=log.DEBUG)
def _handle_error(self, e):
log.err(e)
def _process_plot_price(self, item):
query = self.dbpool.runInteraction(self._conditional_insert_price, item)
query.addErrback(self._handle_error, item)
def _conditional_insert_price(self, conn, item):
# create record if doesn't exist.
# all this block run on it's own thread
conn.execute("select * from houseplot_monthly_price where title = %s and price_date= %s", (item["title"], item["price_date"]))
result = conn.fetchone()
if result:
log.msg("Price Item already stored in db: %s" % item, level=log.DEBUG)
else:
conn.execute(
"insert into houseplot_monthly_price(title, price_date, price) values (%s, %s, %s)", (item["title"], item["price_date"], item["price"])
)
log.msg("Price Item stored in db: %s" % item, level=log.DEBUG)

Scrapy returns no output - just a [

I'm trying to run the spider found in this crawler and for simplicity sake I'm using this start_url because it is just a list of 320 movies. (So, the crawler won't run for 5 hours as given in the github page).
I crawl using scrapy crawl imdb -o output.json but the output.json file contains nothing. It has just a [ in it.
import scrapy
from texteval.items import ImdbMovie, ImdbReview
import urlparse
import math
import re
class ImdbSpider(scrapy.Spider):
name = "imdb"
allowed_domains = ["imdb.com"]
start_urls = [
# "http://www.imdb.com/chart/top",
# "http://www.imdb.com/chart/bottom"
"http://www.imdb.com/search/title?countries=csxx&sort=moviemeter,asc"
]
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.robotstxt.ROBOTSTXT_OBEY': True,
}
base_url = "http://www.imdb.com"
def parse(self, response):
movies = response.xpath("//*[#id='main']/table/tr/td[3]/a/#href")
for i in xrange(len(movies)):
l = self.base_url + movies[i].extract()
print l
request = scrapy.Request(l, callback=self.parse_movie)
yield request
next = response.xpath("//*[#id='right']/span/a")[-1]
next_url = self.base_url + next.xpath(".//#href")[0].extract()
next_text = next.xpath(".//text()").extract()[0][:4]
if next_text == "Next":
request = scrapy.Request(next_url, callback=self.parse)
yield request
'''
for sel in response.xpath("//table[#class='chart']/tbody/tr"):
url = urlparse.urljoin(response.url, sel.xpath("td[2]/a/#href").extract()[0].strip())
request = scrapy.Request(url, callback=self.parse_movie)
yield request
'''
def parse_movie(self, response):
movie = ImdbMovie()
i1 = response.url.find('/tt') + 1
i2 = response.url.find('?')
i2 = i2 - 1 if i2 > -1 else i2
movie['id'] = response.url[i1:i2]
movie['url'] = "http://www.imdb.com/title/" + movie['id']
r_tmp = response.xpath("//div[#class='titlePageSprite star-box-giga-star']/text()")
if r_tmp is None or r_tmp == "" or len(r_tmp) < 1:
return
movie['rating'] = int(float(r_tmp.extract()[0].strip()) * 10)
movie['title'] = response.xpath("//span[#itemprop='name']/text()").extract()[0]
movie['reviews_url'] = movie['url'] + "/reviews"
# Number of reviews associated with this movie
n = response.xpath("//*[#id='titleUserReviewsTeaser']/div/div[3]/a[2]/text()")
if n is None or n == "" or len(n) < 1:
return
n = n[0].extract().replace("See all ", "").replace(" user reviews", "")\
.replace(" user review", "").replace(",", "").replace(".", "").replace("See ", "")
if n == "one":
n = 1
else:
n = int(n)
movie['number_of_reviews'] = n
r = int(math.ceil(n / 10))
for x in xrange(1, r):
start = x * 10 - 10
url = movie['reviews_url'] + "?start=" + str(start)
request = scrapy.Request(url, callback=self.parse_review)
request.meta['movieObj'] = movie
yield request
def parse_review(self, response):
ranks = response.xpath("//*[#id='tn15content']/div")[0::2]
texts = response.xpath("//*[#id='tn15content']/p")
del texts[-1]
if len(ranks) != len(texts):
return
for i in xrange(0, len(ranks) - 1):
review = ImdbReview()
review['movieObj'] = response.meta['movieObj']
review['text'] = texts[i].xpath("text()").extract()
rating = ranks[i].xpath(".//img[2]/#src").re("-?\\d+")
if rating is None or rating == "" or len(rating) < 1:
return
review['rating'] = int(rating[0])
yield review
Can someone tell me where am I going wrong?
In my opinion, this web site should be load the list of movies use by js. Fristly, I suggest you should check the output about: movies = response.xpath("//*[#id='main']/table/tr/td[3]/a/#href"). If you want to get js content, you can use webkit in scrapy as a downloader middleware.