Related
Could anyone give me a clue what is wrong with the JSON syntax listed below. According to JsonLint the error is/begins in front of the word "csv". I cannot see the error in the syntax but it must be there. If someone could just tell me the principle behind my error please.
{
"lists": {
"csv": "function(head, req) {
var row,
first = true;
// output HTTP headers
start({
headers: {
'Content-Type': 'text/csv'
},
});
// iterate through the result set
while (row = getRow()) {
// get the doc (include_docs=true)
var doc = row.doc;
// if this is the first row
if (first) {
// output column headers
send(Object.keys(doc).join(',') + 'n');
first = false;
}
// build up a line of output
var line = '';
// iterate through each row
for (var i in doc) {
// comma separator
if (line.length > 0) {
line += ',';
}
// output the value, ensuring values that themselves
// contain commas are enclosed in double quotes
var val = doc[i];
if (typeof val == 'string' && val.indexOf(',') > -1) {
line += '" ' + val.replace(/"/g, ' "" ') + ' "';
} else {
line += val;
}
}
line += 'n';
// send the line
send(line);
}
}
"
}
}
EDIT:
Full code(CouchDB view/list):
{
"_id": "_design/comptno",
"_rev": "2-4531ba9fd5bcd6b7fbc5bc8555f0bfe3",
"views": {
"list_example": {
"map": "function(doc) {\r\n if (doc.compartment.number) {\r\n emit(doc.compartment.number, null);\r\n }\r\n};"
},
"list_example2": {
"map": "function(doc) {\r\n if (doc.compartment.number) {\r\n emit(doc.compartment.number, null);\r\n }\r\n};"
}
},
"lists":{"csv":"function(head, req) { var row, first = true; // output HTTP headers start({ headers: { 'Content-Type': 'text/csv' }, }); // iterate through the result set while (row = getRow()) { // get the doc (include_docs=true) var doc = row.doc; // if this is the first row if (first) { // output column headers send(Object.keys(doc).join(',') + 'n'); first = false; } // build up a line of output var line = ''; // iterate through each row for (var i in doc) { // comma separator if (line.length > 0) { line += ','; } // output the value, ensuring values that themselves // contain commas are enclosed in double quotes var val = doc[i]; if (typeof val == 'string' && val.indexOf(',') > -1) { line += '"' + val.replace(/"/g, '""') + '"'; } else { line += val; } } line += 'n'; // send the line send(line); }}"},
"language": "javascript"
}
Here corrected without multilines:
{
"lists": {
"csv": [
"function(head, req) {",
"var row,",
"first = true;",
"",
"// output HTTP headers",
"start({",
"headers: {",
"'Content-Type': 'text/csv'",
"},",
"});",
"",
"// iterate through the result set",
"while (row = getRow()) {",
"// get the doc (include_docs=true)",
"var doc = row.doc;",
"// if this is the first row",
"if (first) {",
"// output column headers",
"send(Object.keys(doc).join(',') + 'n');",
"first = false;",
"}",
"// build up a line of output",
"var line = '';",
"// iterate through each row",
"for (var i in doc) {",
"// comma separator",
"if (line.length > 0) {",
"line += ',';",
"}",
"// output the value, ensuring values that themselves",
"// contain commas are enclosed in double quotes",
"var val = doc[i];",
"if (typeof val == 'string' && val.indexOf(',') > -1) {",
"line += '\" ' + val.replace(/\"/g, ' \"\" ') + ' ';",
"} else {",
"line += val;",
"}",
"}",
"line += 'n';",
"// send the line",
"send(line);",
"}",
"}",
""
]
}
}
I have below data in a text file:
Details_A
name: A1
Valid: A1_Value
name: A2
Valid: A2_Value
Details_A2
name: A2
Valid: A2_Value
name: A2
Valid: A2_Value
which I am trying to convert into in below html table:
Details
You can use awk like this :
awk 'BEGIN {
x = 0;
print "<table border="1">"
}
{
if (NF == 1){
print "<tr ><td colspan="2">"$i"</td>";
print "</tr>"
} else {
if (x == 0){
x++;
print "<tr><td>"$i"</td>"
} else {
x = 0;
print "<td>"$i"</td></tr>"
}
}
}
END {
print "</table>"
}' input.txt > table.html
Feel free to add any additional style
For older version of awk, you can use the following, tested on an awk implementation of 2009-11-26 (from one-true-awk) :
awk 'BEGIN {
x = 0;
y = 0;
print "<table border="1">"
}
{
for (i = 1; i<=NF ; i++){
if (NF == 1){
print "<tr ><td colspan="2">"$i"</td></tr>";
} else {
if (x == 0 && y == 0){
print "<tr><td>"$i" ";
x++;
}
else if (x == 0 && y == 1){
print "<td>"$i" ";
x++;
}
else if (x==(NF-1)){
x = 0;
y++;
if (y == 2){
y = 0;
print ""$i"</td></tr>";
}
else{
print ""$i"</td>";
}
}
else {
print ""$i" ";
x++;
}
}
}
}
END {
print "</table>"
}' input.txt > table.html
For this last version, x is incremented at each space delimiter until we reach NF-1 which is the last word and we should put an ending </td>. The decision for the ending </tr> depends on the value of y which is incremented at each line and re-initialized when the max count of <td> is reached (here 2 <td> per <tr>)
I am new to this and working with json recently. I need to convert json to csv.
I have a json file with the following data. How do i convert to csv file.
[
{
"name":"Search&Navigator",
"region":"F&R",
"checkins":[[2000,100],[2001,200],[2002,300],[2003,275],[2004,222],[2005,280],[2006,281],[2007,400],[2008,55],[2009,300]],
"teamsize":[[2000,10],[2001,7],[2002,7],[2003,12],[2004,5],[2005,3],[2006,10],[2007,12],[2008,12],[2009,10]],
"Checkintimes":[[2000,40],[2001,50],[2002,60],[2003,50],[2004,40],[2005,30],[2006,30],[2007,35],[2008,30],[2009,30]]
},
{
"name":"Cobalt",
"region":"Legal",
"checkins":[[2000,121],[2001,339],[2002,124],[2003,255],[2004,325],[2005,460],[2006,177],[2007,221],[2008,122],[2009,120]],
"teamsize":[[2000,12],[2001,12],[2002,12],[2003,12],[20041,2],[2005,12],[2006,11],[2007,3],[2008,7],[2009,7]],
"Checkintimes":[[2000,20],[2001,40],[2002,60],[2003,50],[2004,40],[2005,30],[2006,35],[2007,30],[2008,30],[2009,30]]
}
]
Any links to similar requirement would be fine to start with.
Here is a fiddle I used in the past for this:
http://jsfiddle.net/sturtevant/vUnF9/
The relevant function:
function JSON2CSV(objArray) {
var array = typeof objArray != 'object' ? JSON.parse(objArray) : objArray;
var str = '';
var line = '';
if ($("#labels").is(':checked')) {
var head = array[0];
if ($("#quote").is(':checked')) {
for (var index in array[0]) {
var value = index + "";
line += '"' + value.replace(/"/g, '""') + '",';
}
} else {
for (var index in array[0]) {
line += index + ',';
}
}
line = line.slice(0, -1);
str += line + '\r\n';
}
for (var i = 0; i < array.length; i++) {
var line = '';
if ($("#quote").is(':checked')) {
for (var index in array[i]) {
var value = array[i][index] + "";
line += '"' + value.replace(/"/g, '""') + '",';
}
} else {
for (var index in array[i]) {
line += array[i][index] + ',';
}
}
line = line.slice(0, -1);
str += line + '\r\n';
}
return str;
}
Of course, depending on how you want the conversion to be structured, you could write your own.
I hope this helps.
Miller (mlr) is particularly suitable to parse json and convert it to CSV, TSV DKVP (Key-pair value)
https://miller.readthedocs.io/en/latest/file-formats/
Here is the command that will generate the desired output
mlr --ijson --ocsv cat myfile.json
I have a list of data points for an old DCS system that i would like to migrate into a more manageable organization (a csv preferably). I would like to make each entry type into it's own column. In the end i would also like to convert it back to it's original format given the modified csv file. this is a couple examples of the entries:
{SYSTEM ENTITY 95UA114( ) }
&T DIGINHG
&N 95UA114
UNIT = FD
PTDESC ="95C-101 COMPRESSOR S/D "
KEYWORD ="C101 S/D"
PRIMMOD = HPFD01G
ASSOCDSP ="HPFD01~1"
$CDETAIL =" "
HWYNUM = 08
PNTBOXTY = DHP
BOXNUM = 8
PTDISCL = FULL
LOADDEST = HG_HIWAY
SLOTNUM = 12
INPTSSLT = 10
NMBRINPT = 1
$AUXUNIT = --
$REALARM = 0
DIGALFMT = STATE2
DLYTIME = 0
CHOFSTPR = NOACTION
CNFERRPR = EMERGNCY
OFFNRMPR = EMERGNCY
CRITSCAN = OFF
CCRANK = NEITHER
EIPPCODE = --
EIPEVENT = ANY
EIPENB = ENABLE
ALENBST = ENABLE
STATE2 ="S/D_BYP "
STATE1 ="NORMAL "
UBOXCLR = RED
LBOXCLR = GREEN
OVERVAL = ON
INPTDIR = DIRECT
PNTBOXIN = 1
PNTPCTY = MODICON
PCADDRI1 = 2097
SPECIFI1 = 1
The first line (system entity) is not needed as the info is redundant. The columns (Unit, ptdesc) are not always the same however. I was planning on using autohotkey to do this, but if anyone has a better suggestion I'm all ears. Right now, I have the code that reads the file and separates each entity and splits each line at the = to determine the value of each column, but lining them up is proving to be a challenge. The only way I can think of to handle it would be to use a 2d array, but it will be cumbersome to write, and I'm sure there is a better/more efficient way (since the file is about 21k lines/500 entities).
numEntries = 0
AutoTrim, Off
outFile = test.csv
filedelete, %outfile%
filereadline, columns, columns.txt, 1
fileappend, TAG`,NAME`,%columns%`r`n, %outfile%
stringsplit, columns, columns,`,
numcolumns=%columns0%
msgbox %numcolumns%
Loop, Read, H3ALL.EB
{
ifinstring, A_LoopReadLine,=
{
i++
data%i%=%A_LoopReadLine%
continue
}
ifinstring, A_LoopReadLine,SYSTEM ENTITY
{
numEntries+=1
if(numEntries > 1)
{
fileappend,`r`n,%outfile%
Loop %i%
{
element := data%A_Index%
stringsplit, element, element,=
Loop %numcolumns%
{
test1=%element1%
test2:=columns%A_Index%
if (test1=test2)
{
;add to correct column
}
}
}
data=
i=0
}
continue
}
ifinstring, A_LoopReadLine,&T
{
stringsplit, line, A_LoopReadLine,%A_SPACE%
tag=%line2%
fileappend,%tag%`,,%outfile%
;msgbox the tag is %tag%
continue
}
ifinstring, A_LoopReadLine,&N
{
stringsplit, line, A_LoopReadLIne,%A_SPACE%
name=%line2%
fileappend,%name%`,, %outfile%
;msgbox the name is %name%
continue
}
}
msgbox DONE!
working code:
i=0
j=0
outFile = test.csv
filedelete, %outfile%
AutoTrim, off
filereadline, columns, columns.txt, 1
fileappend,%columns%`r`n,%outfile%
stringsplit, columns, columns,`,
numColumns=%columns0%
Loop, Read, H3ALL.EB
{
ifinstring, A_LoopReadLine,=
{
i++
stringsplit, line, A_LoopReadLine,=
loop %numColumns% {
test1:=columns%A_Index%
test2=%line1%
if(test1=test2) {
dataArray%A_Index%_%j%=%line2%
;msgbox column %test1% (%A_Index%) contains %line2%
}
}
continue
}
ifinstring, A_LoopReadLine,SYSTEM ENTITY
{
j++
i=0
continue
}
ifinstring, A_LoopReadLine,&T
{
i++
stringsplit, line, A_LoopReadLine,%A_SPACE%
dataArray%i%_%j%=%line2%
continue
}
ifinstring, A_LoopReadLine,&N
{
i++
stringsplit, line, A_LoopReadLIne,%A_SPACE%
dataArray%i%_%j%=%line2%
continue
}
}
outerIndex=0
Loop %j% {
outerIndex++
Loop %numColumns% {
cell:=dataArray%A_Index%_%outerIndex%
fileappend,%cell%`,,%outfile%
}
fileappend,`r`n,%outfile%
}
got it working:
i=0
j=0
outFile = test.csv
filedelete, %outfile%
AutoTrim, off
filereadline, columns, columns.txt, 1
fileappend,%columns%`r`n,%outfile%
stringsplit, columns, columns,`,
numColumns=%columns0%
Loop, Read, H3ALL.EB
{
ifinstring, A_LoopReadLine,=
{
i++
stringsplit, line, A_LoopReadLine,=
loop %numColumns% {
test1:=columns%A_Index%
test2=%line1%
if(test1=test2) {
dataArray%A_Index%_%j%=%line2%
;msgbox column %test1% (%A_Index%) contains %line2%
}
}
continue
}
ifinstring, A_LoopReadLine,SYSTEM ENTITY
{
j++
i=0
continue
}
ifinstring, A_LoopReadLine,&T
{
i++
stringsplit, line, A_LoopReadLine,%A_SPACE%
dataArray%i%_%j%=%line2%
continue
}
ifinstring, A_LoopReadLine,&N
{
i++
stringsplit, line, A_LoopReadLIne,%A_SPACE%
dataArray%i%_%j%=%line2%
continue
}
}
outerIndex=0
Loop %j% {
outerIndex++
Loop %numColumns% {
cell:=dataArray%A_Index%_%outerIndex%
fileappend,%cell%`,,%outfile%
}
fileappend,`r`n,%outfile%
}
Here is an alternate solution for you (I started writing it before you posted your own solution). I didn't realise you had a column file so I extract the column headings from the file.
outFile = test.csv
FileRead, test, H3ALL.EB
filedelete, %outfile%
columns := Object()
;get all the columns names
temp := RegExReplace(test, "`a)(=.*|{SYSTEM ENTITY .*)")
;sort and make unique
sort, temp, U
;make it an array
loop, parse, temp, `n
{ if(A_LoopField == "")
{ continue
}
columns.Insert(A_loopfield)
}
;write the columns to file
for key, val in columns
{ FileAppend, % val ",", % outfile
}
FileAppend, `r`n, % outfile
;split the entries up
test := RegExReplace(test, "`a)\{SYSTEM ENTITY .*", "``")
entries := StrSplit(test , "``")
;write each entry as a row in the csv
for key, val in entries
{ if(val = "")
{ continue
}
row := Object()
loop, parse, val, `n
{ StringSplit, data, A_LoopField, =
row[data1] := data2
}
for key2, val2 in columns
{ FileAppend, % row[val2] ",", % outfile
}
FileAppend, `r`n, % outfile
}
How can I take the output of hg history and convert it into a dot file?
You are looking for this extension.
I wrote a script to do this (and called it hghistory2dot.pl). See its usage below the code:
#!/usr/bin/perl
print "digraph {\n";
$first = 1;
$cset = ();
sub printedge {
my $one = csetstr(shift(#_));
my $two = csetstr(shift(#_));
print $one, " -> ", $two, ";\n";
}
sub csetstr {
my $csetid = shift(#_);
$csetid =~ s/\s//;
$csetid =~ s/\\n//;
return "cset_" . $csetid;
}
while($line = <> ) {
if (!($line eq "\n") ) {
$line =~ s/\n/\\n/;
push(#cset, $line);
}
else {
print csetstr($current), " [shape=record label=\"", #cset, "\"];\n";
#cset = ();
}
if( $line =~ m/^changeset/ ) {
#arr = split(/:/, $line);
$arr[2] =~ s/\s//;
if( ! $parent_found && ! $first) {
#previous changeset had no defined parent; therefore this one is the implied parent.
printedge($current, $arr[2]);
}
$current = $arr[2];
$parent_found = 0;
$first = 0;
}
elsif($line =~ m/^parent/) {
$parent_found = 1;
#arr = split(/:/, $line);
$arr[2] =~ s/\s//;
printedge($current, $arr[2]);
}
}
print "}\n";
hg history | hghistory2dot.pl | dot -Tpng > tree.png