Delete item selected from treeview in tkinter - json

I have a json that shows me its content in a treeview. Currently I have a function that selects the item and deletes it, but it only deletes it in the treeview and it is not updated in the json.
My question is how to call the json in this function to update and delete the selected item.
this function deletes the item in the treeview
def borrar_select():
borrar1 = json_tree.selection()[0]
json_tree.delete(borrar1)
I tried to call the json to read it and write in this function.
def borrar_select():
with open('prueba1.json', "r") as f:
data = json.load(f)
for record in data['Clientes']:
borrar1 = json_tree.selection()[0]
json_tree.delete(borrar1)
with open('prueba1.json', "w") as f:
json.dump(record,f,indent=4)
Actually, it deletes the selected row in the treeview but in the console I get the following error.
PS C:\Users\*\Desktop\Tkinter> & E:/Prog/Python3/Python311/python.exe c:/Users/*/Desktop/Tkinter/test1.py
Exception in Tkinter callback
Traceback (most recent call last):
File "E:\Prog\Python3\Python311\Lib\tkinter\__init__.py", line 1948, in __call__
return self.func(*args)
^^^^^^^^^^^^^^^^
File "c:\Users\*\Desktop\Tkinter\test1.py", line 102, in borrar_select
borrar1 = json_tree.selection()[0]
~~~~~~~~~~~~~~~~~~~~~^^^
IndexError: tuple index out of range
Consequently, the modification is not saved in the json either.
I use python very recently so I would like if someone can give me a hand to solve this.
This is a example of how it works
import json
from tkinter import ttk
from tkinter import *
import tkinter as tk
ventana = tk.Tk()
ventana.title("Test")
ventana.geometry("1000x600")
frame1 = tk.Frame(ventana, bg="green", height=300, width=700)
frame1.grid(row=1, column=0)
frame2 = tk.Frame(ventana, bg="yellow", height=300, width=700)
frame2.grid(row=2, column=0)
frame_entry = tk.Frame(frame2) #Frame para los entry
frame_entry.pack(pady=20)
tree_frame = tk.Frame(frame1) #Frame para el arbol
tree_frame.pack(pady=20)
#style del tree
style = ttk.Style()
style.theme_use("clam")
style.configure("Treeview", background="#c7c7c7", foreground="black", rowheight=25,fieldbackground="#a1a1a1")
style.map("Treeview", background=[('selected','green')])
tree_scroll = Scrollbar(tree_frame) #Frame para el scrollbar del arbol
tree_scroll.pack(side=RIGHT, fill=Y)
#Lista Treeview
json_tree = ttk.Treeview(tree_frame, yscrollcommand=tree_scroll.set)
json_tree.pack()
#config scroll
tree_scroll.config(command=json_tree.yview)
#Definir columnas
json_tree['column'] = ("Logo", "Name", "Last Name", "Something")
#Formato columnas
json_tree.column("#0", width=0, minwidth=0)#Columna Fantasma
json_tree.column("Logo", anchor="w", width=120)
json_tree.column("Name", anchor="w", width=120)
json_tree.column("Last Name", anchor="w", width=120)
json_tree.column("Something", anchor="w", width=120)
#headings
json_tree.heading("#0", text="", anchor="w")#Columna Fantasma
json_tree.heading("Logo", text="Logo", anchor="w")
json_tree.heading("Name", text="Name", anchor="w")
json_tree.heading("Last Name", text="Last Name", anchor="w")
json_tree.heading("Something", text="Something", anchor="w")
#color rows
json_tree.tag_configure('par', background="#fff")
json_tree.tag_configure('inpar', background="#d6d6d6")
#Abrir y leer json para acceder a las propiedades en el objeto
with open('prueba1.json', "r") as f:
data = json.load(f)
count=0
for record in data['Clientes']:
if count % 2 ==0:
json_tree.insert(parent='', index="end", iid=count, text="", values=(record['Logo'],record['Name'],record['Last Name'],record['Something']), tags=('par',))
else:
json_tree.insert(parent='', index="end", iid=count, text="", values=(record['Logo'],record['Name'],record['Last Name'],record['Something']), tags=('inpar',))
count+=1
#entrys
l1 = Label( frame_entry, text="Logo")
l1.grid(row=0, column=0)
logo_lb = Entry( frame_entry)
logo_lb.grid(row=1, column=0)
l2 = Label( frame_entry, text="Name")
l2.grid(row=0, column=1)
name_lb = Entry(frame_entry)
name_lb.grid(row=1, column=1)
l3 = Label( frame_entry, text="Last Name")
l3.grid(row=0, column=2)
lastname_lb = Entry(frame_entry)
lastname_lb.grid(row=1, column=2)
l4 = Label( frame_entry, text="Something")
l4.grid(row=0, column=3,)
something_lb = Entry(frame_entry)
something_lb.grid(row=1, column=3)
#funciones de los botones
def borrar_select():
with open('prueba1.json', "r") as f:
data = json.load(f)
for record in data['Clientes']:
borrar1 = json_tree.selection()[0]
json_tree.delete(borrar1)
with open('prueba1.json', "w") as f:
json.dump(record,f,indent=4)
#Limpiar las cajas
logo_lb.delete(0,END)
name_lb.delete(0,END)
lastname_lb.delete(0,END)
something_lb.delete(0,END)
borrar_btn = tk.Button(frame2, text="Delete", command=borrar_select)
borrar_btn.pack(side=RIGHT, ipadx=30, pady=10)
def select_record():
#Limpiar las cajas
logo_lb.delete(0,END)
name_lb.delete(0,END)
lastname_lb.delete(0,END)
something_lb.delete(0,END)
selected = json_tree.focus() #numero de la posicion del record en el tree
values = json_tree.item(selected,'values') #con la posicion seleccionada, toma el valor del mismo
logo_lb.insert(0, values[0])
name_lb.insert(0,values[1])
lastname_lb.insert(0,values[2])
something_lb.insert(0,values[3])
select_btn = tk.Button(frame2, text="Select", command=select_record)
select_btn.pack(side=LEFT, ipadx=30,)
ventana.mainloop()
{
"Clientes": [
{
"Logo": "C:/Users/*/Desktop/Tkinter/logos/selavalacarita.png",
"Name": "2",
"Last Name": "3",
"Something": "4"
},
{
"Logo": "C:/Users/*/Desktop/Tkinter/logos/selavalacarita.png",
"Name": "1",
"Last Name": "4",
"Something": "7"
}
]
}

The below code inside borrar_select():
with open('prueba1.json', "r") as f:
data = json.load(f)
for record in data['Clientes']:
borrar1 = json_tree.selection()[0]
json_tree.delete(borrar1)
will loop through all records inside JSON file and try to delete first selected item in the treeview in each iteration. If there is no item selected, it will raise exception because json_tree.selection() will return empty tuple. Even there is one item selected, exception will also be raised in second iteration because the selected item has been deleted.
You can simply remove the selected items using json_tree.delete(...) and then save the remaining items to file:
# funciones de los botones
def borrar_select():
selections = json_tree.selection()
if selections:
# remove selected items from treeview
json_tree.delete(*selections)
# get the records from treeview to as list of dictionaries
fields = ["Logo", "Name", "Last Name", "Something"]
records = [dict(zip(fields,json_tree.item(iid,"values"))) for iid in json_tree.get_children()]
# save the list of dictionaris to file
with open('prueba1.json', 'w') as f:
json.dump({'Clientes': records}, f, indent=4)
# Are below lines necessary?
#Limpiar las cajas
logo_lb.delete(0, END)
name_lb.delete(0, END)
lastname_lb.delete(0, END)
something_lb.delete(0, END)

Related

Loop through list of dictionaries and append to csv

I'm currently trying to collect tweets with the Twitter API. I want to merge two list dictionaries to a csv. The ['data'] list consist of ID and tweet, the second list, ['includes']['users'], consist of username and location. I have tried with two for loops in order to merge these elements, one for ['data'] and one for ['includes']['users']. But I end up having the exact same tweet and ID for every user in my csv output.
print(json.dumps(json_response, indent=4, sort_keys=True))
My data looks like this (not real tweets):
{"data": [{"author_id": "1234","id": "9999","text": "This is tweet number 1"},{"author_id": "9876","id": "1111","text": "This is another tweet"},],"includes": {"users": [{"id": "9999","location": "Earth","name": "George Huston","username": "George_Huston"},{"id": "1111","name": "Adam Sandler,"username": "adam_sandler"}]
json_response['includes']['users']
[{'name': 'George Huston','location': 'Earth','id': '9876','username': 'George_Huston'},{'name': 'Adam Sandler', 'id': '9999', 'username': 'adam_sandler}]
Creating a csv:
# Create file
csvFile = open("data.csv", "a", newline="", encoding='utf-8')
csvWriter = csv.writer(csvFile)
#Create headers for the data you want to save, in this example, we only want save these columns in our dataset
csvWriter.writerow(['id', 'username', 'text', 'location'])
csvFile.close()
def append_to_csv(json_response, fileName):
#A counter variable
counter = 0
#Open OR create the target CSV file
csvFile = open(fileName, "a", newline="", encoding='utf-8')
csvWriter = csv.writer(csvFile)
#Loop through each tweet
for tweet in json_response['data']:
tweet_id = tweet['id']
text = tweet['text']
for element in json_response['includes']['users']:
username = element['username']
if ('location' in tweet):
location = element['location']
else:
location = " "
# Assemble all data in a list
res = [tweet_id,username,text,location]
# Append the result to the CSV file
csvWriter.writerow(res)
counter += 1
# When done, close the CSV file
csvFile.close()
# Print the number of tweets for this iteration
print("# of Tweets added from this response: ", counter)
append_to_csv(json_response, "data.csv")
But get this csv output:
id,username,text,location
9999,George_Huston,"This is tweet number 1",
9999,adam_sandler,"This is tweet number 1",
The id, text, location is always the same, while the username is different. How can I solve this problem?
In your for tweet in json_response['data'] loop you overwrite tweet_id and text as the loop goes on. The output you see is whatever they were set to in the last iteration of the loop.
It seems from the Twitter API that you can get usernames from the Tweet Objects as well, without json_response['includes']['users'] that you used.
Does this do what you want?
# Create file
fileName = 'data.csv'
csvFile = open("data.csv", "w", newline="", encoding='utf-8')
csvWriter = csv.writer(csvFile)
#Create headers for the data you want to save, in this example, we only want save these columns in our dataset
csvWriter.writerow(['id', 'username', 'text', 'location'])
csvFile.close()
def append_to_csv(json_response, fileName):
#A counter variable
counter = 0
#Open OR create the target CSV file
csvFile = open(fileName, "a", newline="", encoding='utf-8')
csvWriter = csv.writer(csvFile)
#Loop through each tweet
for tweet in json_response['data']:
tweet_id = tweet['id']
text = tweet['text']
username = tweet['username']
if ('location' in tweet):
location = element['location']
else:
location = " "
# Assemble all data in a list
res = [tweet_id,username,text,location]
# Append the result to the CSV file
csvWriter.writerow(res)
counter += 1
# When done, close the CSV file
csvFile.close()
# Print the number of tweets for this iteration
print("# of Tweets added from this response: ", counter)
append_to_csv(json_response, "data.csv")

BeautifulSoup4 & Python - multiple pages into DataFrame

I have some code which collects the description, price, and old price(if on sale) from online retailers over multiple pages. I'm looking to export this into a DataFrame and have had a go but run into the following error:
ValueError: Shape of passed values is (1, 3210), indices imply (3, 3210).
from bs4 import BeautifulSoup
import requests
import time
import pandas as pd
# Start Timer
then = time.time()
# Headers
headers = {"User-Agent": "Mozilla/5.0"}
# Set HTTPCode = 200 and Counter = 1
Code = 200
i = 1
scraped_data = []
while Code == 200:
# Put url together
url = "https://www.asos.com/women/jumpers-cardigans/cat/?cid=2637&page="
url = url + str(i)
# Request URL
r = requests.get(url, allow_redirects=False, headers=headers) # No redirects to allow infinite page count
data = r.text
Code = r.status_code
# Soup
soup = BeautifulSoup(data, 'lxml')
# For loop each product then scroll through title price, old price and description
divs = soup.find_all('article', attrs={'class': '_2qG85dG'}) # want to cycle through each of these
for div in divs:
# Get Description
Description = div.find('div', attrs={'class': '_3J74XsK'})
Description = Description.text.strip()
scraped_data.append(Description)
# Fetch TitlePrice
NewPrice = div.find('span', attrs={'data-auto-id':'productTilePrice'})
NewPrice = NewPrice.text.strip("£")
scraped_data.append(NewPrice)
# Fetch OldPrice
try:
OldPrice = div.find('span', attrs={'data-auto-id': 'productTileSaleAmount'})
OldPrice = OldPrice.text.strip("£")
scraped_data.append(OldPrice)
except AttributeError:
OldPrice = ""
scraped_data.append(OldPrice)
print('page', i, 'scraped')
# Print Array
#array = {"Description": str(Description), "CurrentPrice": str(NewPrice), "Old Price": str(OldPrice)}
#print(array)
i = i + 1
else:
i = i - 2
now = time.time()
pd.DataFrame(scraped_data, columns=["A", "B", "C"])
print('Parse complete with', i, 'pages' + ' in', now-then, 'seconds')
Right now your data is appended to list based on an algorithm that I can describe like this:
Load the web page
Append to list value A
Append to list value B
Append to list value C
What this creates for each run through the dataset is:
[A1, B1, C1, A2, B2, C2]
There exists only one column with data, which is what pandas is telling you. To construct the dataframe properly, either you need to swap it into a format where you have, on each row entry, a tuple of three values (heh) like:
[
(A1, B1, C1),
(A2, B2, C2)
]
Or, in my preferred way because it's far more robust to coding errors and inconsistent lengths to your data: creating each row as a dictionary of columns. Thus,
rowdict_list = []
for row in data_source:
a = extract_a()
b = extract_b()
c = extract_c()
rowdict_list.append({'column_a': a, 'column_b': b, 'column_c': c})
And the data frame is constructed easily without having to explicitly specify columns in the constructor with df = pd.DataFrame(rowdict_list).
You can create a DataFrame using the array dictionary.
You would want to set the values of the array dict to empty lists that way you can append the values from the webpage into the correct list. Also move the array variable outside of the while loop.
array = {"Description": [], "CurrentPrice": [], "Old Price": []}
scraped_data = []
while Code == 200:
...
On the line where you were previously defining the array variable you would then want to append the desciption, price and old price values like so.
array['Description'].append(str(Description))
array['CurrentPrice'].append(str(NewPrice))
array['Old Price'].append(str(OldPrice))
Then you can to create a DataFrame using the array variable
pd.DataFrame(array)
So the final solution would look something like
array = {"Description": [], "CurrentPrice": [], "Old Price": []}
scraped_data = []
while Code == 200:
...
# For loop
for div in divs:
# Get Description
Description = div.find('h3', attrs={'class': 'product__title'})
Description = Description.text.strip()
# Fetch TitlePrice
try:
NewPrice = div.find('div', attrs={'class': 'price product__price--current'})
NewPrice = NewPrice.text.strip()
except AttributeError:
NewPrice = div.find('p', attrs={'class': 'price price--reduced'})
NewPrice = NewPrice.text.strip()
# Fetch OldPrice
try:
OldPrice = div.find('p', attrs={'class': 'price price--previous'})
OldPrice = OldPrice.text.strip()
except AttributeError:
OldPrice = ""
array['Description'].append(str(Description))
array['CurrentPrice'].append(str(NewPrice))
array['Old Price'].append(str(OldPrice))
# Print Array
print(array)
df = pd.DataFrame(array)
i = i + 1
else:
i = i - 2
now = time.time()
print('Parse complete with', i, 'pages' + ' in', now - then, 'seconds')
Finally make sure you've imported pandas at the top of the module
import pandas as pd

I cannot access dropdown widget output in a loop

I have been going in circles for hours.
I am trying to get the dropdown output to loop, to ensure the result is correct.
I get the dropdown list, but the "output" is none.
If I select 'DEV' or "DEV", it prints DEV. The output (w) is none and the loop exits at else not if??
The python code (jypter):
source = ["Select Source", "DEV", "TEMP", "PROD"]
source_ = widgets.Dropdown(
options=source,
value=source[0],
description='Select variable:',
disabled=False,
button_style=''
)
def sourceURL(b):
clear_output()
print(source_.value)
### Drop Down
print("Drop Down")
display(source_)
w = source_.observe(sourceURL)
## print("output: ")
print(w) ### output is None
#### LOOP
if w == 'DEV':
print("This is Dev")
elif w == "TEST":
print("This is TEST")
else:
print("This is PROD")
When you do source_.observe(sourceURL), there is no return value from this call. Hence this is equivalent to w = None.
To get the behaviour you want, I think you would need to move the code at the end of your script into your sourceURL function.
import ipywidgets as widgets
from IPython.display import clear_output
source = ["DEV", "TEMP", "PROD"]
source_ = widgets.Dropdown(
options=source,
value=source[0],
description='Select variable:',
disabled=False,
button_style=''
)
def sourceURL(b):
# clear_output()
w = b['new']
if w == 'DEV':
print("This is Dev")
elif w == "TEMP":
print("This is TEMP")
else:
print("This is PROD")
### Drop Down
print("Drop Down")
display(source_)
w = source_.observe(sourceURL, names='value')

Is there a Python fuction to separate a line delimited by ',' into different columns?

I'm extracting data from twitter and putting it into a csv, how do I separate it into different columns (userName, text) instead of having everything in a single row?
This is for Python 3.6.
import csv
import tweepy
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
search = tweepy.Cursor(api.search, q="#XXXXX", lang="es").items(5)
tweet = [[item.user.name.encode("utf-8"), item.text.encode("utf-8")] for item in search]
print(tweet)
with open('Hashtag_tweets.csv', 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(["userName", "text"])
writer.writerows(tweet)
pass
I expect the output of
LINE OF EXCEL:
"b'Irene Cuesta',b'RT #deustoEmprende: \xc2\xa1Tenemos nueva cr\xc3\xb3nica! Descubre c\xc3\xb3mo transcurrieron las sesiones sobre #gestiontiempo en #masterdual #emprendimientoen\xe2\x80\xa6'
to be
LINE 1 COLUMN 1:
Irene Cuesta
LINE 1 COLUMN 2:
RT #deustoEmprende: \xc2\xa1Tenemos nueva cr\xc3\xb3nica! Descubre c\xc3\xb3mo transcurrieron las sesiones sobre #gestiontiempo en #masterdual #emprendimientoen\xe2\x80\xa6

Wrong comparison result in python

I'm trying to compare a date from a JSON extract and a date in a CSV file.
Even though I print both and dates and type are the same, the comparison always says they are different when there aren't.
I've already tried many things but nothing worked.
the line comparing is
if (item_in["Data_Hora"].encode('utf-8')).strip <> Last_Date[1].strip:
Here's the full code:
import json
import requests
import csv
import os
import itertools
def get_last_row(In_file):
with open(In_file,'rb') as f:
reader = csv.reader(f)
lastline = reader.next()
for line in reader:
lastline = line
print type (line)
return lastline
params = {
'api_key': 'tz-XmMtuYEVeeRjIdk6cqW1z',
}
r = requests.get(
'https://www.parsehub.com/api/v2/projects/tw5xOi-cVrMG-_vAZC_cX1QX/last_ready_run/data',
params=params)
json_object_in = r.content
data_in = json.loads(json_object_in)
for item_in in data_in["Estacao_Pluviometrica"]:
if item_in["Regiao"] != "Santa Felicidade": #Santa Felicidade possui duas Estações, excluida do processo enquanto não conseguir diferencia-las
# Altere abaixo com o diretório onde ficarão armazenados os arquivos "{diretório}/%s.csv"
Path = "/tmp/csv2/%s.csv" %item_in["Regiao"]
if os.path.isfile(Path): # Checa se já existe um arquivo para a região / Check if file already exist
OutFile = open(Path, 'a+')
Last_Date = get_last_row(Path)
# Check if last date is igual to actual - Error is here - comparison is always different even though data is the same.
if (item_in["Data_Hora"].encode('utf-8')).strip <> Last_Date[1].strip:
print "Nova entrada para %s" %item_in["Regiao"]
fieldnames = ["Regiao", "Data e Hora", "Chuva (mm)"]
CSVFile = csv.DictWriter (OutFile, fieldnames=fieldnames, delimiter=',', lineterminator = '\n')
CSVFile.writerow({"Regiao": item_in["Regiao"].encode('latin-1') , "Data e Hora": item_in["Data_Hora"] , "Chuva (mm)": item_in["Chuva_mm"]})
else: # When entry is the same as before
print "Entrada repetida para %s" %item_in["Regiao"]
OutFile.close()
else: #caso não exista, se cria um novo arquivo / In case there's no
print "Criando novo arquivo para regiao %s em %s." % (item_in["Regiao"], Path)
OutFile = open(Path, 'w')
fieldnames = ["Regiao", "Data e Hora", "Chuva (mm)"]
CSVFile = csv.DictWriter (OutFile, fieldnames=fieldnames, delimiter=',', lineterminator = '\n')
CSVFile.writeheader()
CSVFile.writerow({"Regiao": item_in["Regiao"].encode('latin-1') , "Data e Hora": item_in["Data_Hora"] , "Chuva (mm)": item_in["Chuva_mm"]})
OutFile.close()
I'm using Python 2.7.10 and IDLE
Thanks.