Related
There are bunch of files in a directory that has json formatted entries in each line. The size of the files varies from 5k to 200MB. I have this code to go though each file, parse the data I am looking for in the json and finally form a data frame. This script is taking a very long time to finish, in fact it never finishes.
Is there any way to speed it up so that I can read the files faster?
Code:
library(jsonlite)
library(data.table)
setwd("C:/Files/")
#data <- lapply(readLines("test.txt"), fromJSON)
df<-data.frame(Timestamp=factor(),Source=factor(),Host=factor(),Status=factor())
filenames <- list.files("Json_files", pattern="*.txt", full.names=TRUE)
for(i in filenames){
print(i)
data <- lapply(readLines(i), fromJSON)
myDf <- do.call("rbind", lapply(data, function(d) {
data.frame(TimeStamp = d$payloadData$timestamp,
Source = d$payloadData$source,
Host = d$payloadData$host,
Status = d$payloadData$status)}))
df<-rbind(df,myDf)
}
This is a sample entry but there are thousands of entries like this in the file:
{"senderDateTimeStamp":"2016/04/08 10:53:18","senderHost":null,"senderAppcode":"app","senderUsecase":"appinternalstats_prod","destinationTopic":"app_appinternalstats_realtimedata_topic","correlatedRecord":false,"needCorrelationCacheCleanup":false,"needCorrelation":false,"correlationAttributes":null,"correlationRecordCount":0,"correlateTimeWindowInMills":0,"lastCorrelationRecord":false,"realtimeESStorage":true,"receiverDateTimeStamp":1460127623591,"payloadData":{"timestamp":"2016-04-08T10:53:18.169","status":"get","source":"STREAM","fund":"JVV","client":"","region":"","evetid":"","osareqid":"","basis":"","pricingdate":"","content":"","msgname":"","recipient":"","objid":"","idlreqno":"","host":"WEB01","servermember":"test"},"payloadDataText":"","key":"app:appinternalstats_prod","destinationTopicName":"app_appinternalstats_realtimedata_topic","hdfsPath":"app/appinternalstats_prod","esindex":"app","estype":"appinternalstats_prod","useCase":"appinternalstats_prod","appCode":"app"}
{"senderDateTimeStamp":"2016/04/08 10:54:18","senderHost":null,"senderAppcode":"app","senderUsecase":"appinternalstats_prod","destinationTopic":"app_appinternalstats_realtimedata_topic","correlatedRecord":false,"needCorrelationCacheCleanup":false,"needCorrelation":false,"correlationAttributes":null,"correlationRecordCount":0,"correlateTimeWindowInMills":0,"lastCorrelationRecord":false,"realtimeESStorage":true,"receiverDateTimeStamp":1460127623591,"payloadData":{"timestamp":"2016-04-08T10:53:18.169","status":"get","source":"STREAM","fund":"JVV","client":"","region":"","evetid":"","osareqid":"","basis":"","pricingdate":"","content":"","msgname":"","recipient":"","objid":"","idlreqno":"","host":"WEB02","servermember":""},"payloadDataText":"","key":"app:appinternalstats_prod","destinationTopicName":"app_appinternalstats_realtimedata_topic","hdfsPath":"app/appinternalstats_prod","esindex":"app","estype":"appinternalstats_prod","useCase":"appinternalstats_prod","appCode":"app"}
{"senderDateTimeStamp":"2016/04/08 10:55:18","senderHost":null,"senderAppcode":"app","senderUsecase":"appinternalstats_prod","destinationTopic":"app_appinternalstats_realtimedata_topic","correlatedRecord":false,"needCorrelationCacheCleanup":false,"needCorrelation":false,"correlationAttributes":null,"correlationRecordCount":0,"correlateTimeWindowInMills":0,"lastCorrelationRecord":false,"realtimeESStorage":true,"receiverDateTimeStamp":1460127623591,"payloadData":{"timestamp":"2016-04-08T10:53:18.169","status":"get","source":"STREAM","fund":"JVV","client":"","region":"","evetid":"","osareqid":"","basis":"","pricingdate":"","content":"","msgname":"","recipient":"","objid":"","idlreqno":"","host":"WEB02","servermember":""},"payloadDataText":"","key":"app:appinternalstats_prod","destinationTopicName":"app_appinternalstats_realtimedata_topic","hdfsPath":"app/appinternalstats_prod","esindex":"app","estype":"appinternalstats_prod","useCase":"appinternalstats_prod","appCode":"app"}
With your example data in "c:/tmp.txt":
> df <- jsonlite::fromJSON(paste0("[",paste0(readLines("c:/tmp.txt"),collapse=","),"]"))$payloadData[c("timestamp","source","host","status")]
> df
timestamp source host status
1 2016-04-08T10:53:18.169 STREAM WEB01 get
2 2016-04-08T10:53:18.169 STREAM WEB02 get
3 2016-04-08T10:53:18.169 STREAM WEB02 get
So to adapt your code to get a list of dataframes:
dflist <- lapply(filenames, function(i) {
jsonlite::fromJSON(
paste0("[",
paste0(readLines(i),collapse=","),
"]")
)$payloadData[c("timestamp","source","host","status")]
})
The idea is to transform your lines (from readLines) into a big json array and then create the dataframe by parsing it as json.
As lmo already showcased, using lapply on your filenmaes list procide you with a list of dataframes, if you really want only one dataframe at end you can load the data.table packages and then use rbindlist on dflist to get only one dataframe.
Or if you're short in memory this thread may help you.
One speed up is to replace your for loop with lapply Then drop the final rbind. the speed up here would be that R would not have to repeatedly copy an increasingly large file, df over your "bunch" of files. The result would be stored in a convenient list that you could either use as is or convert to a data.frame in one go:
# create processing function
getData <- function(i) {
print(i)
data <- lapply(readLines(i), fromJSON)
myDf <- do.call("rbind", lapply(data, function(d) {
data.frame(TimeStamp = d$payloadData$timestamp,
Source = d$payloadData$source,
Host = d$payloadData$host,
Status = d$payloadData$status)}))
}
# lapply over files
myDataList <- lapply(filenames, getData)
I have some JSON that looks like this:
"total_rows":141,"offset":0,"rows":[
{"id":"1","key":"a","value":{"SP$Sale_Price":"240000","CONTRACTDATE$Contract_Date":"2006-10-26T05:00:00"}},
{"id":"2","key":"b","value":{"SP$Sale_Price":"2000000","CONTRACTDATE$Contract_Date":"2006-08-22T05:00:00"}},
{"id":"3","key":"c","value":{"SP$Sale_Price":"780000","CONTRACTDATE$Contract_Date":"2007-01-18T06:00:00"}},
...
In R, what would be the easiest way to produce a scatter-plot of SP$Sale_Price versus CONTRACTDATE$Contract_Date?
I got this far:
install.packages("rjson")
library("rjson")
json_file <- "http://localhost:5984/testdb/_design/sold/_view/sold?limit=100"
json_data <- fromJSON(file=json_file)
install.packages("plyr")
library(plyr)
asFrame <- do.call("rbind.fill", lapply(json_data, as.data.frame))
but now I'm stuck...
> plot(CONTRACTDATE$Contract_Date, SP$Sale_Price)
Error in plot(CONTRACTDATE$Contract_Date, SP$Sale_Price) :
object 'CONTRACTDATE' not found
How to make this work?
Suppose you have the following JSON-file:
txt <- '{"total_rows":141,"offset":0,"rows":[
{"id":"1","key":"a","value":{"SP$Sale_Price":"240000","CONTRACTDATE$Contract_Date":"2006-10-26T05:00:00"}},
{"id":"2","key":"b","value":{"SP$Sale_Price":"2000000","CONTRACTDATE$Contract_Date":"2006-08-22T05:00:00"}},
{"id":"3","key":"c","value":{"SP$Sale_Price":"780000","CONTRACTDATE$Contract_Date":"2007-01-18T06:00:00"}}]}'
Then you can read it as follows with the jsonlite package:
library(jsonlite)
json_data <- fromJSON(txt, flatten = TRUE)
# get the needed dataframe
dat <- json_data$rows
# set convenient names for the columns
# this step is optional, it just gives you nicer columnnames
names(dat) <- c("id","key","sale_price","contract_date")
# convert the 'contract_date' column to a datetime format
dat$contract_date <- strptime(dat$contract_date, format="%Y-%m-%dT%H:%M:%S", tz="GMT")
Now you can plot:
plot(dat$contract_date, dat$sale_price)
Which gives:
If you choose not to flatten the JSON, you can do:
json_data <- fromJSON(txt)
dat <- json_data$rows$value
sp <- strtoi(dat$`SP$Sale_Price`)
cd <- strptime(dat$`CONTRACTDATE$Contract_Date`, format="%Y-%m-%dT%H:%M:%S", tz="GMT")
plot(cd,sp)
Which gives the same plot:
I found a way that doesn't discard the field names:
install.packages("jsonlite")
install.packages("curl")
json <- fromJSON(json_file)
r <- json$rows
At this point r looks like this:
> class(r)
[1] "data.frame"
> colnames(r)
[1] "id" "key" "value"
After some more Googling and trial-and-error I landed on this:
f <- r$value
sp <- strtoi(f[["SP$Sale_Price"]])
cd <- strptime(f[["CONTRACTDATE$Contract_Date"]], format="%Y-%m-%dT%H:%M:%S", tz="GMT")
plot(cd,sp)
And the result on my full data-set...
I have a data frame where the values of column Parameters are Json data:
# Parameters
#1 {"a":0,"b":[10.2,11.5,22.1]}
#2 {"a":3,"b":[4.0,6.2,-3.3]}
...
I want to extract the parameters of each row and append them to the data frame as columns A, B1, B2 and B3.
How can I do it?
I would rather use dplyr if it is possible and efficient.
In your example data, each row contains a json object. This format is called jsonlines aka ndjson, and the jsonlite package has a special function stream_in to parse such data into a data frame:
# Example data
mydata <- data.frame(parameters = c(
'{"a":0,"b":[10.2,11.5,22.1]}',
'{"a":3,"b":[4.0,6.2,-3.3]}'
), stringsAsFactors = FALSE)
# Parse json lines
res <- jsonlite::stream_in(textConnection(mydata$parameters))
# Extract columns
a <- res$a
b1 <- sapply(res$b, "[", 1)
b2 <- sapply(res$b, "[", 2)
b3 <- sapply(res$b, "[", 3)
In your example, the json structure is fairly simple so the other suggestions work as well, but this solution will generalize to more complex json structures.
I actually had a similar problem where I had multiple variables in a data frame which were JSON objects and a lot of them were NA's, but I did not want to remove the rows where NA's existed. I wrote a function which is passed a data frame, id within the data frame(usually a record ID), and the variable name in quotes to parse. The function will create two subsets, one for records which contain JSON objects and another to keep track of NA value records for the same variable then it joins those data frames and joins their combination to the original data frame thereby replacing the former variable. Perhaps it will help you or someone else as it has worked for me in a few cases now. I also haven't really cleaned it up too much so I apologize if my variable names are a bit confusing as well as this was a very ad-hoc function I wrote for work. I also should state that I did use another poster's idea for replacing the former variable with the new variables created from the JSON object. You can find that here : Add (insert) a column between two columns in a data.frame
One last note: there is a package called tidyjson which would've had a simpler solution but apparently cannot work with list type JSON objects. At least that's my interpretation.
library(jsonlite)
library(stringr)
library(dplyr)
parse_var <- function(df,id, var) {
m <- df[,var]
p <- m[-which(is.na(m))]
n <- df[,id]
key <- n[-which(is.na(df[,var]))]
#create df for rows which are NA
key_na <- n[which(is.na(df[,var]))]
q <- m[which(is.na(m))]
parse_df_na <- data.frame(key_na,q,stringsAsFactors = FALSE)
#Parse JSON values and bind them together into a dataframe.
p <- lapply(p,function(x){
fromJSON(x) %>% data.frame(stringsAsFactors = FALSE)}) %>% bind_rows()
#bind the record id's of the JSON values to the above JSON parsed dataframe and name the columns appropriately.
parse_df <- data.frame(key,p,stringsAsFactors = FALSE)
## The new variables begin with a capital 'x' so I replace those with my former variables name
n <- names(parse_df) %>% str_replace('X',paste(var,".",sep = ""))
n <- n[2:length(n)]
colnames(parse_df) <- c(id,n)
#join the dataframe for NA JSON values and the dataframe containing parsed JSON values, then remove the NA column,q.
parse_df <- merge(parse_df,parse_df_na,by.x = id,by.y = 'key_na',all = TRUE)
#Remove the new column formed by the NA values#
parse_df <- parse_df[,-which(names(parse_df) =='q')]
####Replace variable that is being parsed in dataframe with the new parsed and names values.######
new_df <- data.frame(append(df,parse_df[,-which(names(parse_df) == id)],after = which(names(df) == var)),stringsAsFactors = FALSE)
new_df <- new_df[,-which(names(new_df) == var)]
return(new_df)
}
I'm trying to download NBA player information from Numberfire and then put that information into a data frame. However I seem to be running into a few issues
The following snippet downloads the information just fine
require(RCurl)
require(stringr)
require(rjson)
#download data from numberfire
nf <- "https://www.numberfire.com/nba/fantasy/fantasy-basketball-projections"
html <- getURL(nf)
Then there is what I assume to be a JSON data structure
#extract json variable (?)
pat <- "NF_DATA.*}}}"
jsn <- str_extract(html, pat)
jsn <- str_split(jsn, "NF_DATA = ")
parse <- newJSONParser()
parse$addData(jsn)
It seems to add data OK as it doesn't throw any errors, but if there is data in that object I can't tell or seem to get it out!
I'd paste in the jsn variable but it's way over the character limit. Any hints as to where I'm going wrong would be much appreciated
Adding the final line gets a nice list format that you can transform to a data.frame
require(RCurl); require(stringr); require(rjson)
#download data from numberfire
nf <- "https://www.numberfire.com/nba/fantasy/fantasy-basketball-projections"
html <- getURL(nf)
#extract json variable (?)
pat <- "NF_DATA.*}}}"
jsn <- str_extract(html, pat)
jsn <- str_split(jsn, "NF_DATA = ")
fromJSON(jsn[[1]][[2]])
I need to convert json file into a data frame. Each line in the json file, may have different number of entries. For example
{"timestamp":"2016-12-13T04:04:06.394-0500",
"test101":"2016-12-13T04:04:06.382-0500",
"error":"false","from":"xon","event":"DAT","BT":"work","cd":"E","id":"IBM",
"key":"20161213040330617511","begin_work":"2016-12-13T04:04:06.383-0500"","#version:"1","#timestamp":"2016-12-14T20:04:29.502Z"}
{"timestamp":"2016-12-13T04:04:05.318-0500","test101":"2016-12-13T04:03:46.074-0500","error":"false","from":"de","event":"cp","BT":"work","cd":"dsh","id":"appl",
"key":"142314089",
"begin_work":"2016-12-13T04:03:46.074-0500",
"refresh":"2016-12-13T04:03:45.920-0500",
"co_refresh":"2016-12-13T04:03:45.769-0500",
"test104":"2016-12-13T04:03:45.832-0500",
"test104":"2016-12-13T04:03:45.832-0500",
"test105":"2016-12-13T04:03:46.031-0500",
"test7":"2016-12-13T04:03:46.032-0500",
"t-test9":"2016-12-13T04:03:45.704-0500",
"test10_StartDateTimeStamp":"2016-12-13T04:03:45.704-0500",
"stop":"2016-12-13T04:03:50.772-0500",
"stop_again":"2016-12-13T04:03:46.091-0500",
"#version":"1","#timestamp":"2016-12-14T20:04:29.503Z"}
{"timestamp":"2016-12-13T04:04:07.113-0500","test101":"2016-12-13T04:04:07.068-0500","error":"false","from":"xon","event":"DAT","BT":"work","cd":"E","id":"3YPS","key":"20161213040318326935","begin_work":"2016-12-13T04:04:07.069-0500","#version":"1","#timestamp":"2016-12-14T20:04:29.505Z"}
I need to start parsing the file form a keyword called "key" until a keyword called #version.
Data frame need to look something like this:
key group time
20161213040330617511 begin_work 2016-12-13T04:04:06.383-0500
142314089 begin_work 2016-12-13T04:03:46.074-0500
142314089 refresh 2016-12-13T04:03:45.920-0500
142314089 co_refresh 2016-12-13T04:03:45.769-0500
142314089 test104 2016-12-13T04:03:45.832-0500
etc
I have tried something like this:
library(jsonlite)
library(data.table)
setwd("C:/file/")
filenames <- list.files("system", pattern="*json*", full.names=TRUE)
dflist <- lapply(filenames, function(i) {
jsonlite::fromJSON(
paste0("[",
paste0(readLines(i),collapse=","),
"]"),flatten=TRUE
)
})
d<-rbindlist(dflist, use.names=TRUE, fill=TRUE)
I need to put key value pairs into a 3 column data frame
I am getting field names after key as columns and NA as the values. Any ideas how could I convert json to df frame in R?
This is something you can try, a combination of dplyr and tidyr :
library(dplyr)
library(tidyr)
library(jsonlite)
data <- jsonlite::fromJSON("data.json")
lapply(data, function(d) as_data_frame(d)) %>%
bind_rows() %>%
gather(groups, val, -timestamp, -key) %>%
select(key, group, timestamp)
BTW I had to change your json example a little bit.
Here's the json file I use:
{"x":{"timestamp":"2016-12-13T04:04:06.394-0500",
"test101":"2016-12-13T04:04:06.382-0500",
"error":"false","from":"xon","event":"DAT","BT":"work","cd":"E","id":"IBM",
"key":"20161213040330617511","begin_work":"2016-12-13T04:04:06.383-0500","#version":"1","#timestamp":"2016-12-14T20:04:29.502Z"},
"y":{"timestamp":"2016-12-13T04:04:05.318-0500","test101":"2016-12-13T04:03:46.074-0500","error":"false","from":"de","event":"cp","BT":"work","cd":"dsh","id":"appl",
"key":"142314089",
"begin_work":"2016-12-13T04:03:46.074-0500",
"refresh":"2016-12-13T04:03:45.920-0500",
"co_refresh":"2016-12-13T04:03:45.769-0500",
"test104":"2016-12-13T04:03:45.832-0500",
"test105":"2016-12-13T04:03:46.031-0500",
"test7":"2016-12-13T04:03:46.032-0500",
"t-test9":"2016-12-13T04:03:45.704-0500",
"test10_StartDateTimeStamp":"2016-12-13T04:03:45.704-0500",
"stop":"2016-12-13T04:03:50.772-0500",
"stop_again":"2016-12-13T04:03:46.091-0500",
"#version":"1","#timestamp":"2016-12-14T20:04:29.503Z"},
"z":{"timestamp":"2016-12-13T04:04:07.113-0500","test101":"2016-12-13T04:04:07.068-0500","error":"false","from":"xon","event":"DAT","BT":"work","cd":"E","id":"3YPS","key":"20161213040318326935","begin_work":"2016-12-13T04:04:07.069-0500","#version":"1","#timestamp":"2016-12-14T20:04:29.505Z"}}