I am trying to convert csv into json file using python3. I keep getting this error, FileNotFound, when the csv file exists in the directory. Please help me fix the issue. Below is the code i tried. Also i would be grateful, if anyone could suggest how to transfer MongoDB database into a json file using python3.
import csv, json, os
#get all csv files from the directory
dir_path = r'C:\Users\USER\Desktop\output_files'
inputfile = [file for file in os.listdir(dir_path) if file.endswith('.csv')]
print(inputfile)
for file in inputfile:
with open(file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
id = row['ID']
data[id] = row
Writing the files out using this code...
with open(outputfile, "a") as jsonfile:
jsonfile.write(json.dumps(data, indent=4))
Produces the following:
['adult_diapers.csv', 'groceries.csv', 'health_supplements.csv', 'mobility_aids.csv']
Here's my error in more detail:
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-17-1aac06308031> in <module>
6 print(inputfile)
7 for file in inputfile:
----> 8 with open(file, "r") as csvfile:
9 reader = csv.DictReader(csvfile)
10 for row in reader:
FileNotFoundError: [Errno 2] No such file or directory: 'adult_diapers.csv'
Is the full path specified? Looks like it's just the filename and not the full path to the file. Add dir_path and use os.path.join() to concatenate the path and the filename as follows:
with open(os.path.join(dir_path, file), "r") as csvfile:
reader = csv.DictReader(csvfile)
And your final code becomes:
import csv, json, os
#get all csv files from the directory
dir_path = r'C:\Users\USER\Desktop\output_files'
inputfile = [file for file in os.listdir(dir_path) if file.endswith('.csv')]
print(inputfile)
for file in inputfile:
with open(os.path.join(dir_path, file), "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
id = row['ID']
data[id] = row
with open(outputfile, "a") as jsonfile:
jsonfile.write(json.dumps(data, indent=4))
Related
My code reads a bunch of json files from a directory and extract "frequency" and "attenuation" data from those files and write to a csv file. Now I want to save that csv file in a different directory. The code executes without any error but saves in the current directory. Can anyone help to resolve this issue?
import csv
import glob
import json
import os
site = 'alpha'
frequency_to_check = '196050.000'
json_dir_name = 'V:/temp/test/'
json_pattern = os.path.join(json_dir_name, '*.json')
total_files = glob.glob(json_pattern)
atten = []
timestamp = []
save_path = 'V:/python/result/'
if not os.path.isdir(save_path):
os.makedirs(save_path)
filename = f'{site}-{frequency_to_check}.csv'
with open(filename, 'w', newline='') as csv_file:
for file in total_files:
with open(file) as json_file:
output_json = json.load(json_file)
for key in output_json:
if key['start-freq'] == frequency_to_check:
csv.writer(csv_file).writerow([key['start-freq'], key['attenuation']])
save_file = os.path.join(save_path, filename)
csv_file.close()
print(f'Total files processed {len(total_files)}')
The issue as far as I can deduce is here :
csv.writer(csv_file).writerow([key['start-freq'], key['attenuation']])
csv_file is your object that is loaded into memory , and everytime this line is executed you are just writing the rows in the already open file. After that you are just creating a new path :
save_file = os.path.join(save_path, filename)
which is never really used as you close the file too.
To fix this I would suggest that you put save_path as csv file :
import csv
import glob
import json
import os
site = 'alpha'
frequency_to_check = '196050.000'
json_dir_name = 'V:/temp/test/'
json_pattern = os.path.join(json_dir_name, '*.json')
total_files = glob.glob(json_pattern)
atten = []
timestamp = []
save_path = 'V:/python/result/'
if not os.path.isdir(save_path):
os.makedirs(save_path)
filename = f'{site}-{frequency_to_check}.csv'
save_file = os.path.join(save_path, filename)
with open(save_file, 'w', newline='') as csv_file:
for file in total_files:
with open(file) as json_file:
output_json = json.load(json_file)
for key in output_json:
if key['start-freq'] == frequency_to_check:
csv.writer(csv_file).writerow([key['start-freq'], key['attenuation']])
csv_file.close()
print(f'Total files processed {len(total_files)}')
I guess this should work.
I am struggling to convert a json file to a csv file. Any help would be appreciated. I am using Python3
Code
import json
import urllib.request
url = 'https://api.coingecko.com/api/v3/coins/bitcoin/market_chart?vs_currency=usd&days=1&interval=daily&sparkline=false'
req = urllib.request.Request(url)
##parsing response
myfile=open("coingecko1.csv","w",encoding="utf8")
headers="Prices,MrkCap,TolVol \n"
myfile.write(headers)
r = urllib.request.urlopen(req).read()
cont = json.loads(r.decode('utf-8'))
print (cont)#Just to check json result
for market in cont:
prices =(cont["prices"])
market_caps = (cont["market_caps"])
total_volumes = (cont["total_volumes"])
content= prices+","+str(market_caps)+","+str(total_volumes)+" \n"
myfile.write(content)
print("job complete")
Python Result
{'prices': [[1629331200000, 45015.46554608543], [1629361933000, 44618.52978218442]], 'market_caps': [[1629331200000, 847143004614.999], [1629361933000, 837151985590.3453]], 'total_volumes': [[1629331200000, 34668999387.83819], [1629361933000, 33367392889.386738]]}
Traceback (most recent call last):
File "ma1.py", line 22, in <module>
content= prices+","+str(market_caps)+","+str(total_volumes)+" \n"
TypeError: can only concatenate list (not "str") to list
CSV Result
CSV Result
Thank You
Your JSON is nested which is list of lists. To read easily in CSV you must flatten it out
I've reformatted the code to dump to CSV. check below
import csv
import json
import urllib.request
url = 'https://api.coingecko.com/api/v3/coins/bitcoin/market_chart?vs_currency=usd&days=1&interval=daily&sparkline=false'
req = urllib.request.Request(url)
r = urllib.request.urlopen(req).read()
cont = json.loads(r.decode('utf-8'))
# flatten the JSON data to read csv easily
flatten_data = {}
for key in cont:
for value in cont[key]:
if value[0] not in flatten_data:
flatten_data[value[0]] = {}
flatten_data[value[0]].update({key: value[1]})
# write csv with DictWriter
with open('coingecko1.csv', 'w', encoding='utf-8') as csvfile:
headers = ['Item', 'Prices', 'MrkCap', 'TolVol']
writer = csv.DictWriter(csvfile, fieldnames=headers)
writer.writeheader()
for k, v in flatten_data.items():
v.update({'Item': k})
# renamed the columns as required
v['Prices'] = v.pop('prices')
v['MrkCap'] = v.pop('market_caps')
v['TolVol'] = v.pop('total_volumes')
writer.writerow(v)
print("job complete")
I am trying to convert csv files in a folder to a single json file. Below code does the job, but the issue is, json file has the first csv written several times. Below is the code i tried. I guess i am going wrong with assigning the data variable. Help me fix it
import csv, json, os
dir_path = 'C:/Users/USER/Desktop/output_files'
inputfiles = [file for file in os.listdir(dir_path) if file.endswith('.csv')]
outputfile = "data_backup1.json"
for file in inputfiles:
filepath = os.path.join(dir_path, file)
data = {}
with open(filepath, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
id = row['ID']
data[id] = row
with open(outputfile, "a") as jsonfile:
jsonfile.write(json.dumps(data, indent=4))
Expected output: Json file needs to have each csv written only once into it.
if your .csv files and all of the rows do have different ['ID']s, your assigned dictionary keys should be unique. In this case, your dictionary is growing with one entry per reader .csv row.
You have to change the indentation of the jsonfile.write() function as shown below to produce just one .json file. To sort your entries you could add sort_keys=True in this function.
for file in inputfiles:
filepath = os.path.join(dir_path, file)
data = {}
with open(filepath, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
id = row['ID']
data[id] = row
with open(outputfile, "a") as jsonfile:
jsonfile.write(json.dumps(data, indent=4, sort_keys=True))
I am able to execute the below code in python 2.7 and able to merge all csv files to a single excel workbook . But when i am trying to execute in python 3.4 . Getting an error . Let me know if anyone faced this issue and sorted out .
Code:-
import glob, csv, xlwt, os
wb = xlwt.Workbook()
for filename in glob.glob(r'E:\BMCSoftware\Datastore\utility\BPM_Datastore_Utility\*.csv'):
#print (filename)
(f_path, f_name) = os.path.split(filename)
#print (f_name)
(f_short_name, f_extension) = os.path.splitext(f_name)
#print (f_short_name)
ws = wb.add_sheet(f_short_name)
#print (ws)
with open(filename, 'rU') as f:
spamReader = csv.reader(f)
for rowx, row in enumerate(spamReader):
for colx, value in enumerate(row):
ws.write(rowx, colx, value)
wb.save("f:\find_acs_errors_ALL_EMEA.xls")
ERROR:-
>>>
Traceback (most recent call last):
File "E:\BMCSoftware\Python34\Copy of DataStore.py", line 16, in <module>
wb.save("f:\find_acs_errors_ALL_EMEA.xls")
File "E:\BMCSoftware\Python34\lib\site-packages\xlwt-1.0.0-py3.4.egg\xlwt\Workbook.py", line 696, in save
doc.save(filename_or_stream, self.get_biff_data())
File "E:\BMCSoftware\Python34\lib\site-packages\xlwt-1.0.0-py3.4.egg\xlwt\CompoundDoc.py", line 262, in save
f = open(file_name_or_filelike_obj, 'w+b')
FileNotFoundError: [Errno 2] No such file or directory: 'f:\x0cind_acs_errors_ALL_EMEA.xls'
>>>
you should either make double-backsashes or singe forward-slashes in
wb.save("f:\find_acs_errors_ALL_EMEA.xls")
i.e. one of those:
wb.save("f:\\find_acs_errors_ALL_EMEA.xls")
wb.save("f:/find_acs_errors_ALL_EMEA.xls")
hope that helps!
I would like to import a csv file into python with FileChooser and display it as dataframe. Here is the code and it didn't work. Thanks for your kind help.
def get_open_filename(self):
filename = None
chooser = gtk.FileChooserDialog("Open File...", self.window,
gtk.FILE_CHOOSER_ACTION_OPEN,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
response = chooser.run()
if response == gtk.RESPONSE_OK:
with open(chooser.get_filename(), 'rb') as csvfile:
don = DataFrame.from_csvfile(csvfile) ## I am confused here !!!
print don
chooser.destroy()
return filename
I believe from_csv file takes a filename not a file, using these docs
Try replacing
with open(chooser.get_filename(), 'rb') as csvfile:
don = DataFrame.from_csvfile(csvfile) ## I am confused here !!!
print don
with
don = DataFrame.from_csvfile(chooser.get_filename())
print don