My semester project is about classification by using Naive bayes. I ve decided to use Yelp dataset. While I was turning the json file into csv file I came up with couple of problems. Such as :
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
Its because of the wrong usage of json.loads(). I tried a couple of deifferent usage of the function to manage this part of the program. Unfortunately, none of them worked. I put my code down below, if you have any idea about how to handle this, can you please explain it to me?
`
import json
import pandas as pd
from glob import glob
import codecs
global df
global s
global count
def convert(x):
ob = json.loads(x)
for k, v in ob.items():
if isinstance(v, list):
ob[k] = ','.join(v)
elif isinstance(v, dict):
for kk, vv in v.items():
ob['%s_%s' % (k, kk)] = vv
del ob[k]
return ob
s = ""
count = 0
for json_filename in glob('*.json'):
csv_filename = '%s.csv' % json_filename[:-5]
print('Converting %s to %s' % (json_filename, csv_filename))
with open('yelp_dataset_challenge_round9.json','rb') as f: #open in binary mode
for line in f:
for cp in ('cp1252', 'cp850'):
try:
if count is 0:
count = 1
else:
s = str(line.decode('utf-8'))
except UnicodeDecodeError:
pass
df = pd.DataFrame([convert(s)])
df.to_csv(csv_filename, encoding='utf-8', index=False)
`
Thanks in advance :)
Related
I have a python transform in code workbooks that is running this code:
import pandas as pd
def contents(dataset_with_files):
fs = dataset_with_files.filesystem()
filenames = [f.path for f in fs.ls()]
fp = fs.hadoop_path + "/" + filenames[0]
with open(fp, 'r') as f:
t = f.read()
rows = {"text": [t]}
return pd.DataFrame(rows)
But I am getting the error FileNotFoundError: [Errno 2] No such file or directory:
My understanding is that this is the correct way to access a file in the hdfs, is this a repository versus code workbooks limitation?
This documentation helped me figure it out:
https://www.palantir.com/docs/foundry/code-workbook/transforms-unstructured/
It was actually a pretty small change. If you are using the filesystem() you only need the relative path.
import pandas as pd
def contents_old(pycel_test):
fs = pycel_test.filesystem()
filenames = [f.path for f in fs.ls()]
with fs.open(filenames[0], 'r') as f:
value = ...
rows = {"values": [value]}
return pd.DataFrame(rows)
There is also this option, but I found it 10x slower.
from pyspark.sql import Row
def contents(dataset_with_files):
fs = dataset_with_files.filesystem() # This is the FileSystem object.
MyRow = Row("column")
def process_file(file_status):
with fs.open(file_status.path, 'r') as f:
...
rdd = fs.files().rdd
rdd = rdd.flatMap(process_file)
df = rdd.toDF()
return df
I have a simple script which returns a lot of errors:
import numpy as np
def test(array):
ncol=np.shape(array)[1]
return ncol
which is supposed to return the number of columns of array. What is wrong with it?
array is numpy array. Here is the output:
ncol=np.shape(array)[1]
Display all 195 possibilities? (y or n)
ArithmeticError( continue
AssertionError( copyright(
AttributeError( credits(
BaseException( def
BlockingIOError( del
You need to add a try.. catch around ncol=np.shape(array)[1] because it fails when the array is a 1d dimension:
import numpy as np
arr = np.random.normal(size=10)
arr1 = np.random.normal(size=(10,5))
def test(array):
try:
ncol=np.shape(array)[1]
return ncol
except Exception as e:
print("no columns in array")
return None
print(test(arr))
# output:
# no columns in array
# None
print(test(arr1))
# output:
# 5
Is there a way to extract scalar summaries to CSV (preferably from within tensorboard) from tfevents files?
Example code
The following code generates tfevent files in a summary_dir within the same directory. Suppose you let it run and you find something interesting. You want to get the raw data for further investigation. How would you do that?
#!/usr/bin/env python
"""A very simple MNIST classifier."""
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
ce_with_logits = tf.nn.softmax_cross_entropy_with_logits
FLAGS = None
def inference(x):
"""
Build the inference graph.
Parameters
----------
x : placeholder
Returns
-------
Output tensor with the computed logits.
"""
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
return y
def loss(logits, labels):
"""
Calculate the loss from the logits and the labels.
Parameters
----------
logits : Logits tensor, float - [batch_size, NUM_CLASSES].
labels : Labels tensor, int32 - [batch_size]
"""
cross_entropy = tf.reduce_mean(ce_with_logits(labels=labels,
logits=logits))
return cross_entropy
def training(loss, learning_rate=0.5):
"""
Set up the training Ops.
Parameters
----------
loss : Loss tensor, from loss().
learning_rate : The learning rate to use for gradient descent.
Returns
-------
train_op: The Op for training.
"""
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_step = optimizer.minimize(loss)
return train_step
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
y = inference(x)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
loss_ = loss(logits=y, labels=y_)
train_step = training(loss_)
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.name_scope('accuracy'):
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
sess = tf.InteractiveSession()
train_writer = tf.summary.FileWriter('summary_dir/train', sess.graph)
test_writer = tf.summary.FileWriter('summary_dir/test', sess.graph)
tf.global_variables_initializer().run()
for train_step_i in range(100000):
if train_step_i % 100 == 0:
summary, acc = sess.run([merged, accuracy],
feed_dict={x: mnist.test.images,
y_: mnist.test.labels})
test_writer.add_summary(summary, train_step_i)
summary, acc = sess.run([merged, accuracy],
feed_dict={x: mnist.train.images,
y_: mnist.train.labels})
train_writer.add_summary(summary, train_step_i)
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir',
type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
While the answer here is as requested within tensorboard it only allows to download a csv for a single run of a single tag.
If you have for example 10 tags and 20 runs (what is not at all much) you would need to do the above step 200 times (that alone will probably take you more than a hour).
If now you for some reason would like to actually do something with the data for all runs for a single tag you would need to write some weird CSV accumulation script or copy everything by hand (what will probably cost you more than a day).
Therefore I would like to add a solution that extracts a CSV file for every tag with all runs contained. Column headers are the run path names and row indices are the run step numbers.
import os
import numpy as np
import pandas as pd
from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
def tabulate_events(dpath):
summary_iterators = [EventAccumulator(os.path.join(dpath, dname)).Reload() for dname in os.listdir(dpath)]
tags = summary_iterators[0].Tags()['scalars']
for it in summary_iterators:
assert it.Tags()['scalars'] == tags
out = defaultdict(list)
steps = []
for tag in tags:
steps = [e.step for e in summary_iterators[0].Scalars(tag)]
for events in zip(*[acc.Scalars(tag) for acc in summary_iterators]):
assert len(set(e.step for e in events)) == 1
out[tag].append([e.value for e in events])
return out, steps
def to_csv(dpath):
dirs = os.listdir(dpath)
d, steps = tabulate_events(dpath)
tags, values = zip(*d.items())
np_values = np.array(values)
for index, tag in enumerate(tags):
df = pd.DataFrame(np_values[index], index=steps, columns=dirs)
df.to_csv(get_file_path(dpath, tag))
def get_file_path(dpath, tag):
file_name = tag.replace("/", "_") + '.csv'
folder_path = os.path.join(dpath, 'csv')
if not os.path.exists(folder_path):
os.makedirs(folder_path)
return os.path.join(folder_path, file_name)
if __name__ == '__main__':
path = "path_to_your_summaries"
to_csv(path)
My solution builds upon: https://stackoverflow.com/a/48774926/2230045
EDIT:
I created a more sophisticated version and released it on GitHub: https://github.com/Spenhouet/tensorboard-aggregator
This version aggregates multiple tensorboard runs and is able to save the aggregates to a new tensorboard summary or as a .csv file.
Just check the "Data download links" option on the upper-left in TensorBoard, and then click on the "CSV" button that will appear under your scalar summary.
Here is my solution which bases on the previous solutions but can scale up.
import os
import numpy as np
import pandas as pd
from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
def tabulate_events(dpath):
final_out = {}
for dname in os.listdir(dpath):
print(f"Converting run {dname}",end="")
ea = EventAccumulator(os.path.join(dpath, dname)).Reload()
tags = ea.Tags()['scalars']
out = {}
for tag in tags:
tag_values=[]
wall_time=[]
steps=[]
for event in ea.Scalars(tag):
tag_values.append(event.value)
wall_time.append(event.wall_time)
steps.append(event.step)
out[tag]=pd.DataFrame(data=dict(zip(steps,np.array([tag_values,wall_time]).transpose())), columns=steps,index=['value','wall_time'])
if len(tags)>0:
df= pd.concat(out.values(),keys=out.keys())
df.to_csv(f'{dname}.csv')
print("- Done")
else:
print('- Not scalers to write')
final_out[dname] = df
return final_out
if __name__ == '__main__':
path = "youre/path/here"
steps = tabulate_events(path)
pd.concat(steps.values(),keys=steps.keys()).to_csv('all_result.csv')
Very minimal example:
import pandas as pd
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
log_dir = "lightning_logs/version_1"
event_accumulator = EventAccumulator(log_dir)
event_accumulator.Reload()
events = event_accumulator.Scalars("train_loss")
x = [x.step for x in events]
y = [x.value for x in events]
df = pd.DataFrame({"step": x, "train_loss": y})
df.to_csv("train_loss.csv")
print(df)
step train_loss
0 0 700.491516
1 1 163.593246
2 2 146.365448
3 3 153.830215
...
Plotting loss vs epochs example:
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
log_dir = "lightning_logs/version_1"
y_key = "val_loss"
event_accumulator = EventAccumulator(log_dir)
event_accumulator.Reload()
steps = {x.step for x in event_accumulator.Scalars("epoch")}
x = list(range(len(steps)))
y = [x.value for x in event_accumulator.Scalars(y_key) if x.step in steps]
df = pd.DataFrame({"epoch": x, y_key: y})
df.to_csv(f"{y_key}.csv")
fig, ax = plt.subplots()
sns.lineplot(data=df, x="epoch", y=y_key)
fig.savefig("plot.png", dpi=300)
Just to add to #Spen
in case you want to export the data when you have varying numbers of steps.
This will make one large csv file.
Might need to change around the keys for it to work for you.
import os
import numpy as np
import pandas as pd
from collections import defaultdict
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import glob
import pandas as pd
listOutput = (glob.glob("*/"))
listDF = []
for tb_output_folder in listOutput:
print(tb_output_folder)
x = EventAccumulator(path=tb_output_folder)
x.Reload()
x.FirstEventTimestamp()
keys = ['loss', 'mean_absolute_error', 'val_loss', 'val_mean_absolute_error']
listValues = {}
steps = [e.step for e in x.Scalars(keys[0])]
wall_time = [e.wall_time for e in x.Scalars(keys[0])]
index = [e.index for e in x.Scalars(keys[0])]
count = [e.count for e in x.Scalars(keys[0])]
n_steps = len(steps)
listRun = [tb_output_folder] * n_steps
printOutDict = {}
data = np.zeros((n_steps, len(keys)))
for i in range(len(keys)):
data[:,i] = [e.value for e in x.Scalars(keys[i])]
printOutDict = {keys[0]: data[:,0], keys[1]: data[:,1],keys[2]: data[:,2],keys[3]: data[:,3]}
printOutDict['Name'] = listRun
DF = pd.DataFrame(data=printOutDict)
listDF.append(DF)
df = pd.concat(listDF)
df.to_csv('Output.csv')
I am student doing my master thesis. As part of my thesis, I am working with python. I am reading a log file of .csv format and writing the extracted data to another .csv file in a well formatted way. However, when the file is read, I am getting this error:
Traceback (most recent call last): File
"C:\Users\SGADI\workspace\DAB_Trace\my_code\trace_parcer.py", line 19,
in for row in reader:
File "C:\Users\SGADI\Desktop\Python-32bit-3.4.3.2\python-3.4.3\lib\encodings\cp1252.py",
line 23, in decode return codecs.charmap_decode(input,self.errors,decoding_table)[0]
UnicodeDecodeError: 'charmap' codec can't decode byte 0x8d in position 7240: character maps to <undefined>
import csv
import re
#import matplotlib
#import matplotlib.pyplot as plt
import datetime
#import pandas
#from dateutil.parser import parse
#def parse_csv_file():
timestamp = datetime.datetime.strptime('00:00:00.000', '%H:%M:%S.%f')
timestamp_list = []
snr_list = []
freq_list = []
rssi_list = []
dab_present_list = []
counter = 0
f = open("output.txt","w")
with open('test_log_20150325_gps.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
for row in reader:
#timestamp = datetime.datetime.strptime(row[0], '%M:%S.%f')
#timestamp.split(" ",1)
timestamp = row[0]
timestamp_list.append(timestamp)
#timestamp = row[0]
details = row[-1]
counter += 1
print (counter)
#if(counter > 25000):
# break
#timestamp = datetime.datetime.strptime(row[0], '%M:%S.%f')
#timestamp_list.append(float(timestamp))
#search for SNRLevel=\d+
snr = re.findall('SNRLevel=(\d+)', details)
if snr == []:
snr = 0
else:
snr = snr[0]
snr_list.append(int(snr))
#search for Frequency=09ABC
freq = re.findall('Frequency=([0-9a-fA-F]+)', details)
if freq == []:
freq = 0
else:
freq = int(freq[0], 16)
freq_list.append(int(freq))
#search for RSSI=\d+
rssi = re.findall('RSSI=(\d+)', details)
if rssi == []:
rssi = 0
else:
rssi = rssi[0]
rssi_list.append(int(rssi))
#search for DABSignalPresent=\d+
dab_present = re.findall('DABSignalPresent=(\d+)', details)
if dab_present== []:
dab_present = 0
else:
dab_present = dab_present[0]
dab_present_list.append(int(dab_present))
f.write(str(timestamp) + "\t")
f.write(str(freq) + "\t")
f.write(str(snr) + "\t")
f.write(str(rssi) + "\t")
f.write(str(dab_present) + "\n")
print (timestamp, freq, snr, rssi, dab_present)
#print (index+1)
#print(timestamp,freq,snr)
#print (counter)
#print(timestamp_list,freq_list,snr_list,rssi_list)
'''if snr != []:
if freq != []:
timestamp_list.append(timestamp)
snr_list.append(snr)
freq_list.append(freq)
f.write(str(timestamp_list) + "\t")
f.write(str(freq_list) + "\t")
f.write(str(snr_list) + "\n")
print(timestamp_list,freq_list,snr_list)'''
f.close()
I searched for the special character and I did not find any. I searched the Internet which suggested to change the format: I tried ut8, latin1 and few other formats, but i am still getting this error. Can you please help me how to solve with pandas as well. I also tried with pandas but I am still getting the error.
I even removed a line in the log file, but the error occurs in the next line.
Please help me finding a solution, thank you.
i have solved this issue.
we can use this code
import codecs
types_of_encoding = ["utf8", "cp1252"]
for encoding_type in types_of_encoding:
with codecs.open(filename, encoding = encoding_type, errors ='replace') as csvfile:
your code
....
....
I have solved this issue by simply adding a parameter in open()
with open(filename, encoding = 'cp850') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
with open('input.tsv','rb') as f:
for ln in f:
decoded=False
line=''
for cp in ('cp1252', 'cp850','utf-8','utf8'):
try:
line = ln.decode(cp)
decoded=True
break
except UnicodeDecodeError:
pass
if decoded:
# use 'line'
I'm trying to read in a dataframe created via df.to_json() via pd.read_json but I'm getting a ValueError. I think it may have to do with the fact that the index is a MultiIndex but I'm not sure how to deal with that.
The original dataframe of 55k rows is called psi and I created test.json via:
psi.head().to_json('test.json')
Hereis the output of print psi.head().to_string() if you want to use that.
When I do it on this small set of data (5 rows), I get a ValueError.
! wget --no-check-certificate https://gist.githubusercontent.com/olgabot/9897953/raw/c270d8cf1b736676783cc1372b4f8106810a14c5/test.json
import pandas as pd
pd.read_json('test.json')
Here's the full stack:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-14-1de2f0e65268> in <module>()
1 get_ipython().system(u' wget https://gist.githubusercontent.com/olgabot/9897953/raw/c270d8cf1b736676783cc1372b4f8106810a14c5/test.json'>)
2 import pandas as pd
----> 3 pd.read_json('test.json')
/home/obot/virtualenvs/envy/lib/python2.7/site-packages/pandas/io/json.pyc in read_json(path_or_buf, orient, typ, dtype, convert_axes, convert_dates, keep_default_dates, numpy, precise_float, date_unit)
196 obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,
197 keep_default_dates, numpy, precise_float,
--> 198 date_unit).parse()
199
200 if typ == 'series' or obj is None:
/home/obot/virtualenvs/envy/lib/python2.7/site-packages/pandas/io/json.pyc in parse(self)
264
265 else:
--> 266 self._parse_no_numpy()
267
268 if self.obj is None:
/home/obot/virtualenvs/envy/lib/python2.7/site-packages/pandas/io/json.pyc in _parse_no_numpy(self)
481 if orient == "columns":
482 self.obj = DataFrame(
--> 483 loads(json, precise_float=self.precise_float), dtype=None)
484 elif orient == "split":
485 decoded = dict((str(k), v)
ValueError: No ':' found when decoding object value
> /home/obot/virtualenvs/envy/lib/python2.7/site-packages/pandas/io/json.py(483)_parse_no_numpy()
482 self.obj = DataFrame(
--> 483 loads(json, precise_float=self.precise_float), dtype=None)
484 elif orient == "split":
But when I do it on the whole dataframe (55k rows) then I get an invalid pointer error and the IPython kernel dies. Any ideas?
EDIT: added how the json was generated in the first place.
This is not implemented ATM, see the issue here: https://github.com/pydata/pandas/issues/4889.
You can simply reset the index first, e.g
df.reset_index().to_json(...)
and it will work.
Or you can just write json with orient = 'table'
df.to_json(path_or_buf='test.json', orient='table')
read multi_index json
pd.read_json('test.json', orient='table')
if you want to return MultiIndex structure:
# save MultiIndex indexes names
indexes_names = df.index.names
df.reset_index().to_json('dump.json')
# return back MultiIndex structure:
loaded_df = pd.read_json('dump.json').set_index(indexes_names)
this was my simple dirty fix for encoding/decoding multiindex pandas dataframe which seems to also work for datetime in index/columns... not optimized!
here is the encoder to json - I encoder the dataframe, index and columns into a dict to create a json
import json
import pandas as pd
def to_json_multiindex(df):
dfi = df.index.to_frame()
dfc = df.columns.to_frame()
d = dict(
df = df.to_json(),
di = dfi.to_json(),
dc = dfc.to_json()
)
return json.dumps(d)
meanwhile here is the decoder which reads the json dict and re-creates the dataframe
def read_json_multiindex(j):
d = json.loads(j)
di=pd.read_json(d['di'])
if di.shape[1]>1:
di = pd.MultiIndex.from_frame(di)
else:
_name = di.columns[0]
di = di.index
di.name = _name
dc=pd.read_json(d['dc'])
if dc.shape[1]>1:
dc = pd.MultiIndex.from_frame(dc)
else:
_name = dc.columns[0]
dc = dc.index
dc.name = _name
df = pd.read_json(d['df']).values
return pd.DataFrame(
data=df,
index=di,
columns=dc,
)
and here is a test for multiindex columns and index... seems to preserve the dataframe. Couple of issues 1) probably inefficient and 2) does seem to work for datatime in multiindex (but works when it isn't multiindex)
df = pd.DataFrame(
data = [[0,1,2],[2,3,4],[5,6,7]],
index = pd.MultiIndex.from_tuples(
(('aa','bb'),('aa','cc'),('bb','cc')
),
names=['AA','BB']),
columns = pd.MultiIndex.from_tuples(
(('XX','YY'),('XX','ZZ'),('YY','ZZ')
),
names=['YY','ZZ'])
)
j = to_json_multiindex(df)
d = read_json_multiindex(j)