I have a JSON data which is a multi-line JSON. I have created a hive table to load that data into it. I have another JSON which is a single-line JSON record. When I load the single-line JSON record to its hive table and try to query, it works fine. But when I load the multi-line JSON into its hive table, it gives below exception:
Failed with exception java.io.IOException:org.apache.hadoop.hive.serde2.SerDeException: org.codehaus.jackson.JsonParseException: Unexpected end-of-input: expected close marker for OBJECT (from [Source: java.io.ByteArrayInputStream#8b89b3a; line: 1, column: 0]) at [Source: java.io.ByteArrayInputStream#8b89b3a; line: 1, column: 3]
Below is my JSON data:
{
"uploadTimeStamp" : "1486631318873",
"PDID" : "123",
"data" : [ {
"Data" : {
"unit" : "rpm",
"value" : "0"
},
"EventID" : "E1",
"PDID" : "123",
"Timestamp" : 1486631318873,
"Timezone" : 330,
"Version" : "1.0",
"pii" : { }
}, {
"Data" : {
"heading" : "N",
"loc3" : "false",
"loc" : "14.022425",
"loc1" : "78.760587",
"loc4" : "false",
"speed" : "10"
},
"EventID" : "E2",
"PDID" : "123",
"Timestamp" : 1486631318873,
"Timezone" : 330,
"Version" : "1.1",
"pii" : { }
}, {
"Data" : {
"x" : "1.1",
"y" : "1.2",
"z" : "2.2"
},
"EventID" : "E3",
"PDID" : "123",
"Timestamp" : 1486631318873,
"Timezone" : 330,
"Version" : "1.0",
"pii" : { }
}, {
"EventID" : "E4",
"Data" : {
"value" : "50",
"unit" : "percentage"
},
"Version" : "1.0",
"Timestamp" : 1486631318873,
"PDID" : "123",
"Timezone" : 330
}, {
"Data" : {
"unit" : "kmph",
"value" : "70"
},
"EventID" : "E5",
"PDID" : "123",
"Timestamp" : 1486631318873,
"Timezone" : 330,
"Version" : "1.0",
"pii" : { }
} ]
}
I am using /hive/lib/hive-hcatalog-core-0.13.0.jar
Below is my create table statement:
create table test7(
uploadtime bigint,
pdid string,
data array<
struct<Data:struct<
unit:string,
value:int>,
eventid:string,
pdid:bigint,
time:bigint,
timezone:int,
version:int,
pii:struct<pii:string>>,
struct<Data:struct<
heading:string,
Location:string,
latitude:bigint,
longitude:bigint,
Location2:string,
speed:int>,
eventid:string,
pdid:bigint,
time:bigint,
timezone:int,
version:int,
pii:struct<pii:string>>,
struct<Data:struct<
unit:string,
value:int>,
eventid:string,
pdid:bigint,
time:bigint,
timezone:int,
version:int,
pii:struct<pii:string>>,
struct<Data:struct<
x:int,
y:int,
z:int>,
eventid:string,
pdid:bigint,
time:bigint,
timezone:int,
version:int,
pii:struct<pii:string>>,
struct<Data:struct<
heading:string,
loc3:string,
latitude:bigint,
longitude:bigint,
loc4:string,
speed:int>,
eventid:string,
pdid:bigint,
time:bigint,
timezone:int,
version:int,
pii:struct<pii:string>>
>
)
ROW FORMAT SERDE
'org.apache.hive.hcatalog.data.JsonSerDe'
STORED AS TEXTFILE
LOCATION
'/xyz/abc/';
Edit:
Adding the single line JSON and new table create stmt with error:
{"uploadTimeStamp":"1487183800905","PDID":"123","data":[{"Data":{"unit":"rpm","value":"0"},"EventID":"event1","PDID":"123","Timestamp":1487183800905,"Timezone":330,"Version":"1.0","pii":{}},{"Data":{"heading":"N","loc1":"false","latitude":"16.032425","longitude":"80.770587","loc2":"false","speed":"10"},"EventID":"event2","PDID":"123","Timestamp":1487183800905,"Timezone":330,"Version":"1.1","pii":{}},{"Data":{"x":"1.1","y":"1.2","z":"2.2"},"event3":"AccelerometerInfo","PDID":"123","Timestamp":1487183800905,"Timezone":330,"Version":"1.0","pii":{}},{"EventID":"event4","Data":{"value":"50","unit":"percentage"},"Version":"1.0","Timestamp":1487183800905,"PDID":"123","Timezone":330},{"Data":{"unit":"kmph","value":"70"},"EventID":"event5","PDID":"123","Timestamp":1487183800905,"Timezone":330,"Version":"1.0","pii":{}}]}
create table test1(
uploadTimeStamp string,
PDID string,
data array<struct<
Data:struct<unit:string,value:int>,
EventID:string,
PDID:string,
TimeS:bigint,
Timezone:int,
Version:float,
pii:struct<>>,
struct<
Data:struct<heading:string,loc1:string,latitude:double,longitude:double,loc2:string,speed:int>,
EventID:string,
PDID:string,
TimeS:bigint,
Timezone:int,
Version:float,
pii:struct<>>,
struct<
Data:struct<x:float,y:float,z:float>,
EventID:string,
PDID:string,
TimeS:bigint,
Timezone:int,
Version:float,
pii:struct<>>,
struct<
EventID:string,
Data:struct<value:int,unit:percentage>,
Version:float,
TimeS:bigint,
PDID:string,
Timezone:int>,
struct<
Data:struct<unit:string,value:int>,
EventID:string,
PDID:string,
TimeS:bigint,
Timezone:int,
Version:float,
pii:struct<>>
>
ROW FORMAT SERDE
'org.apache.hive.hcatalog.data.JsonSerDe'
STORED AS TEXTFILE
LOCATION
'/ABC/XYZ/';
MismatchedTokenException(320!=313)
...
...
...
FAILED: ParseException line 11:10 mismatched input '<>' expecting < near 'struct' in struct type
Sample data
{"uploadTimeStamp":"1486631318873","PDID":"123","data":[{"Data":{"unit":"rpm","value":"0"},"EventID":"E1","PDID":"123","Timestamp":1486631318873,"Timezone":330,"Version":"1.0","pii":{}},{"Data":{"heading":"N","loc3":"false","loc":"14.022425","loc1":"78.760587","loc4":"false","speed":"10"},"EventID":"E2","PDID":"123","Timestamp":1486631318873,"Timezone":330,"Version":"1.1","pii":{}},{"Data":{"x":"1.1","y":"1.2","z":"2.2"},"EventID":"E3","PDID":"123","Timestamp":1486631318873,"Timezone":330,"Version":"1.0","pii":{}},{"EventID":"E4","Data":{"value":"50","unit":"percentage"},"Version":"1.0","Timestamp":1486631318873,"PDID":"123","Timezone":330},{"Data":{"unit":"kmph","value":"70"},"EventID":"E5","PDID":"123","Timestamp":1486631318873,"Timezone":330,"Version":"1.0","pii":{}}]}
add jar /usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core.jar
create external table myjson
(
uploadTimeStamp string
,PDID string
,data array
<
struct
<
Data:struct
<
unit:string
,value:string
,heading:string
,loc3:string
,loc:string
,loc1:string
,loc4:string
,speed:string
,x:string
,y:string
,z:string
>
,EventID:string
,PDID:string
,`Timestamp`:bigint
,Timezone:smallint
,Version:string
,pii:struct<dummy:string>
>
>
)
row format serde 'org.apache.hive.hcatalog.data.JsonSerDe'
stored as textfile
location '/tmp/myjson'
;
select * from myjson
;
+------------------------+-------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| myjson.uploadtimestamp | myjson.pdid | myjson.data |
+------------------------+-------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| 1486631318873 | 123 | [{"data":{"unit":"rpm","value":"0","heading":null,"loc3":null,"loc":null,"loc1":null,"loc4":null,"speed":null,"x":null,"y":null,"z":null},"eventid":"E1","pdid":"123","timestamp":1486631318873,"timezone":330,"version":"1.0","pii":{"dummy":null}},{"data":{"unit":null,"value":null,"heading":"N","loc3":"false","loc":"14.022425","loc1":"78.760587","loc4":"false","speed":"10","x":null,"y":null,"z":null},"eventid":"E2","pdid":"123","timestamp":1486631318873,"timezone":330,"version":"1.1","pii":{"dummy":null}},{"data":{"unit":null,"value":null,"heading":null,"loc3":null,"loc":null,"loc1":null,"loc4":null,"speed":null,"x":"1.1","y":"1.2","z":"2.2"},"eventid":"E3","pdid":"123","timestamp":1486631318873,"timezone":330,"version":"1.0","pii":{"dummy":null}},{"data":{"unit":"percentage","value":"50","heading":null,"loc3":null,"loc":null,"loc1":null,"loc4":null,"speed":null,"x":null,"y":null,"z":null},"eventid":"E4","pdid":"123","timestamp":1486631318873,"timezone":330,"version":"1.0","pii":null},{"data":{"unit":"kmph","value":"70","heading":null,"loc3":null,"loc":null,"loc1":null,"loc4":null,"speed":null,"x":null,"y":null,"z":null},"eventid":"E5","pdid":"123","timestamp":1486631318873,"timezone":330,"version":"1.0","pii":{"dummy":null}}] |
+------------------------+-------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
select j.uploadTimeStamp
,j.PDID
,d.val.EventID
,d.val.PDID
,d.val.`Timestamp`
,d.val.Timezone
,d.val.Version
,d.val.Data.unit
,d.val.Data.value
,d.val.Data.heading
,d.val.Data.loc3
,d.val.Data.loc
,d.val.Data.loc1
,d.val.Data.loc4
,d.val.Data.speed
,d.val.Data.x
,d.val.Data.y
,d.val.Data.z
from myjson j
lateral view explode (data) d as val
;
+-------------------+--------+---------+------+---------------+----------+---------+------------+-------+---------+-------+-----------+-----------+-------+-------+------+------+------+
| j.uploadtimestamp | j.pdid | eventid | pdid | timestamp | timezone | version | unit | value | heading | loc3 | loc | loc1 | loc4 | speed | x | y | z |
+-------------------+--------+---------+------+---------------+----------+---------+------------+-------+---------+-------+-----------+-----------+-------+-------+------+------+------+
| 1486631318873 | 123 | E1 | 123 | 1486631318873 | 330 | 1.0 | rpm | 0 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
| 1486631318873 | 123 | E2 | 123 | 1486631318873 | 330 | 1.1 | NULL | NULL | N | false | 14.022425 | 78.760587 | false | 10 | NULL | NULL | NULL |
| 1486631318873 | 123 | E3 | 123 | 1486631318873 | 330 | 1.0 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | 1.1 | 1.2 | 2.2 |
| 1486631318873 | 123 | E4 | 123 | 1486631318873 | 330 | 1.0 | percentage | 50 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
| 1486631318873 | 123 | E5 | 123 | 1486631318873 | 330 | 1.0 | kmph | 70 | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL | NULL |
+-------------------+--------+---------+------+---------------+----------+---------+------------+-------+---------+-------+-----------+-----------+-------+-------+------+------+------+
Was having the same issue, then decided to create a custom input format which can extract the multiline(pretty print) json records.
This JsonRecordReader can read a multiline JSON record in Hive. It is extracting the record based on balancing of curly braces - { and }. So the content between first '{' to the balanced last '}' is considered as one complete record. Below is the code snippet:
public static class JsonRecordReader implements RecordReader<LongWritable, Text> {
public static final String START_TAG_KEY = "jsoninput.start";
public static final String END_TAG_KEY = "jsoninput.end";
private byte[] startTag = "{".getBytes();
private byte[] endTag = "}".getBytes();
private long start;
private long end;
private FSDataInputStream fsin;
private final DataOutputBuffer buffer = new DataOutputBuffer();
public JsonRecordReader(FileSplit split, JobConf jobConf) throws IOException {
// uncomment the below lines if you need to get the configuration
// from JobConf:
// startTag = jobConf.get(START_TAG_KEY).getBytes("utf-8");
// endTag = jobConf.get(END_TAG_KEY).getBytes("utf-8");
// open the file and seek to the start of the split:
start = split.getStart();
end = start + split.getLength();
Path file = split.getPath();
FileSystem fs = file.getFileSystem(jobConf);
fsin = fs.open(split.getPath());
fsin.seek(start);
}
#Override
public boolean next(LongWritable key, Text value) throws IOException {
if (fsin.getPos() < end) {
AtomicInteger count = new AtomicInteger(0);
if (readUntilMatch(false, count)) {
try {
buffer.write(startTag);
if (readUntilMatch(true, count)) {
key.set(fsin.getPos());
// create json record from buffer:
String jsonRecord = new String(buffer.getData(), 0, buffer.getLength());
value.set(jsonRecord);
return true;
}
} finally {
buffer.reset();
}
}
}
return false;
}
#Override
public LongWritable createKey() {
return new LongWritable();
}
#Override
public Text createValue() {
return new Text();
}
#Override
public long getPos() throws IOException {
return fsin.getPos();
}
#Override
public void close() throws IOException {
fsin.close();
}
#Override
public float getProgress() throws IOException {
return ((fsin.getPos() - start) / (float) (end - start));
}
private boolean readUntilMatch(boolean withinBlock, AtomicInteger count) throws IOException {
while (true) {
int b = fsin.read();
// end of file:
if (b == -1)
return false;
// save to buffer:
if (withinBlock)
buffer.write(b);
// check if we're matching start/end tag:
if (b == startTag[0]) {
count.incrementAndGet();
if (!withinBlock) {
return true;
}
} else if (b == endTag[0]) {
count.getAndDecrement();
if (count.get() == 0) {
return true;
}
}
// see if we've passed the stop point:
if (!withinBlock && count.get() == 0 && fsin.getPos() >= end)
return false;
}
}
}
This input format can be used along with the JSON Serde supplied by hive to read the multiline JSON file.
CREATE TABLE books (id string, bookname string, properties struct<subscription:string, unit:string>) ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe' STORED AS INPUTFORMAT 'JsonInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat';
The working code with samples is here: https://github.com/unayakdev/hive-json
Related
I've got 2 "tests", of which the one where I'm trying to decode a user works, but the one where I'm trying to decode a list of users doesn't:
import User._
import io.circe._
import io.circe.syntax._
import io.circe.parser.decode
class UserSuite extends munit.FunSuite:
test("List of users can be decoded") {
val json = """|{
| "data" : [
| {
| "id" : "someId",
| "name" : "someName",
| "username" : "someusername"
| },
| {
| "id" : "someId",
| "name" : "someName",
| "username" : "someusername"
| }
| ]
|}""".stripMargin
println(decode[List[User]](json))
}
test("user can be decoded") {
val json = """|{
| "data" : {
| "id" : "someId",
| "name" : "someName",
| "username" : "someusername"
| }
|}""".stripMargin
println(decode[User](json))
}
The failing one produces
Left(DecodingFailure(List, List(DownField(data))))
despite the fact that both the json's relevant structure and the decoders (below) are the same.
final case class User(
id: String,
name: String,
username: String
)
object User:
given Decoder[List[User]] =
deriveDecoder[List[User]].prepare(_.downField("data"))
given Decoder[User] =
deriveDecoder[User].prepare(_.downField("data"))
As far as I understand this should work, even according to one of Travis' older replies but it doesn't.
Is this a bug? Am I doing something wrong?
For reference, This is Scala 3.2.0 and circe 0.14.1
The thing is that that you need two different encoders for User, the one expecting data field to decode the 2nd json and the one not expecting data field while deriving decoder for a list. Otherwise the 1st json should be
"""|{
| "data" : [
| {
| "data" :
| {
| "id" : "someId",
| "name" : "someName",
| "username" : "someusername"
| }
| },
| {
| "data" :
| {
| "id" : "someId",
| "name" : "someName",
| "username" : "someusername"
| }
| }
| ]
|}""
It's better to be explicit now
final case class User(
id: String,
name: String,
username: String
)
object User {
val userDec: Decoder[User] = semiauto.deriveDecoder[User]
val preparedUserDec: Decoder[User] = userDec.prepare(_.downField("data"))
val userListDec: Decoder[List[User]] = {
implicit val dec: Decoder[User] = userDec
Decoder[List[User]].prepare(_.downField("data"))
}
}
val json =
"""|{
| "data" : [
| {
| "id" : "someId",
| "name" : "someName",
| "username" : "someusername"
| },
| {
| "id" : "someId",
| "name" : "someName",
| "username" : "someusername"
| }
| ]
|}""".stripMargin
decode[List[User]](json)(User.userListDec)
// Right(List(User(someId,someName,someusername), User(someId,someName,someusername)))
val json1 =
"""|{
| "data" : {
| "id" : "someId",
| "name" : "someName",
| "username" : "someusername"
| }
|}""".stripMargin
decode[User](json1)(User.preparedUserDec)
// Right(User(someId,someName,someusername))
In golang application I make query to such table:
| ID | AGG_YEAR | AGG_MONTH | GENDER | AGE_RANGE | INCOME_RANGE | TOTAL |
|-----------|----------|-----------|--------|-----------|--------------|-------|
| 107502389 | 2019 | 7 | F | 18_29 | 1000_2000 | 15 |
| 107502389 | 2019 | 7 | F | 18_29 | 2000_4000 | 42 |
| 107502389 | 2019 | 7 | F | 30_44 | 1000_2000 | 25 |
| 107502389 | 2019 | 7 | F | 30_44 | 2000_4000 | 63 |
| 107502389 | 2019 | 7 | M | 18_29 | 1000_2000 | 30 |
| 107502389 | 2019 | 7 | M | 18_29 | 2000_4000 | 18 |
| 107502389 | 2019 | 7 | M | 30_44 | 1000_2000 | 36 |
| 107502389 | 2019 | 7 | M | 30_44 | 2000_4000 | 19 |
This table stores information about the total number of males and females with a certain wage level in a certain month. Usually, after a query to the database, each record is parsed separately one by one:
type Entry struct {
ID int `json:"id"`
AggYear int `json:"agg_year"`
AggMonth int `json:"agg_month"`
Gender string `json:"gender"`
AgeRange string `json:"age_range"`
IncomeRange string `json:"income_range"`
Total int `json:"total"`
}
var entries []Entry
rows, err := database.Query("***"); if err != nil {
fmt.Println(err)
return
}
defer rows.Close()
for rows.Next() {
var entry Entry
if err = rows.Scan(&entry.ID, &entry.AggMethod, &entry.AggYear, &entry.AggMonth, &entry.Gender, &entry.AgeRange, &entry.IncomeRange, &entry.Total); err != nil {
fmt.Println(err)
return
}
entries = append(entries, entry)
}
type IncomeDetails struct {
IncomeRange string `json:"income_range"`
Total int `json:"total"`
}
type AgeDetails struct {
AgeRange string `json:"age_range"`
Details []IncomeDetails `json:"details"`
}
type GenderDetails struct {
Gender string `json:"gender"`
Details []AgeDetails `json:"details"`
}
type EntryDetails struct {
AggYear int `json:"agg_year"`
AggMonth int `json:"agg_month"`
Details []GenderDetails `json:"details"`
}
type DataEntry struct {
ID int `json:"id"`
Details []EntryDetails `json:"details"`
}
var entryDetails []EntryDetails
i:= 0
for i < len(entries) {
var genderDetails []GenderDetails
aggYear := entries[i].AggYear
aggMonth := entries[i].AggMonth
for aggYear == entries[i].AggYear && aggMonth == entries[i].AggMonth {
gender := entries[i].Gender
var ageDetails []AgeDetails
for gender == entries[i].Gender {
ageRange := entries[i].AgeRange
var incomeDetails []IncomeDetails
for ageRange == entries[i].AgeRange && gender == entries[i].Gender { // <- runtime error: index out of range
incomeDetail := IncomeDetails{entries[i].IncomeRange, entries[i].Total}
incomeDetails = append(incomeDetails, incomeDetail)
i++
}
ageDetail := AgeDetails{entries[i-1].AgeRange, incomeDetails}
ageDetails = append(ageDetails, ageDetail)
i++
}
genderDetail := GenderDetails{entries[i-1].Gender, ageDetails}
genderDetails = append(genderDetails, genderDetail)
i++
}
entryDetail := EntryDetails{entries[i-1].AggYear, entries[i-1].AggMonth, genderDetails}
entryDetails = append(entryDetails, entryDetail)
i++
}
I want to know the best way to group the following values as in the example below? I want to understand the sequence of actions. I will be grateful for any help.
[
{
"id": 107502389,
"details": [
{
"agg_year": 2019,
"agg_month": 7,
"details": [
{
"gender": "F",
"details": [
{
"age_range": "18_29",
"details": [
{
"income_range": "1000_2000",
"total": "15"
},
{
"income_range": "2000_4000",
"total": "42"
},
]
},
{
"age_range": "30_44",
"details": [
{
"income_range": "1000_2000",
"total": "25"
},
{
"income_range": "2000_4000",
"total": "63"
},
]
}
]
},
{
"gender": "M",
"details": [
{
"age_range": "18_29",
"details": [
{
"income_range": "1000_2000",
"total": "30"
},
{
"income_range": "2000_4000",
"total": "18"
},
]
},
{
"age_range": "30_44",
"details": [
{
"income_range": "1000_2000",
"total": "36"
},
{
"income_range": "2000_4000",
"total": "19"
},
]
}
]
}
]
}
]
}
]
Usually, I would be updating the Structure according to the response i want. For example in your case it would be :
type DataEntry struct {
ID int `json:"id"`
Details []EntryDetails `json:"details"`
}
type EntryDetails struct {
AggYear int `json:"agg_year"`
AggMonth int `json:"agg_month"`
Details []GenderDetails `json:"details"`
}
type GenderDetails struct {
Gender string `json:"gender"`
Details []AgeDetails `json:"details"`
}
type AgeDetails struct {
AgeRange string `json:"age_range"`
Details []IncomeDetails `json:"details"`
}
type IncomeDetails struct {
IncomeRange string `json:"income_range"`
Total int `json:"total"`
}
Dividing your code into smaller parts is always easier to read and maintain.
Next part to add the details to the struct : you should Query your code according to the requirements to fill the struct one by one. For example: getID-'Entry struct' first, then getAggYear & getAggMonth for the ID-'EntryDetails struct' and so one.
You can find the full working program here : https://play.golang.org/p/_pdb5y9Wd-O
Enjoy !
I am new in Go and need some help.
In my PostgreSQL database I have 4 table. They called: surveys, questions, options and surveys_questions_options.
They looks like this:
surveys table:
| survey_id (uuid4) | survey_name (varchar) |
|--------------------------------------|-----------------------|
| 0cf1cf18-d5fd-474e-a8be-754fbdc89720 | April |
| b9fg55d9-n5fy-s7fe-s5bh-856fbdc89720 | May |
questions table:
| question_id (int) | question_text (text) |
|-------------------|------------------------------|
| 1 | What is your favorite color? |
options table:
| option_id (int) | option_text (text) |
|-------------------|--------------------|
| 1 | red |
| 2 | blue |
| 3 | grey |
| 4 | green |
| 5 | brown |
surveys_questions_options table combines data from all three previous tables:
| survey_id | question_id | option_id |
|--------------------------------------|-------------|-----------|
| 0cf1cf18-d5fd-474e-a8be-754fbdc89720 | 1 | 1 |
| 0cf1cf18-d5fd-474e-a8be-754fbdc89720 | 1 | 2 |
| 0cf1cf18-d5fd-474e-a8be-754fbdc89720 | 1 | 3 |
| b9fg55d9-n5fy-s7fe-s5bh-856fbdc89720 | 1 | 3 |
| b9fg55d9-n5fy-s7fe-s5bh-856fbdc89720 | 1 | 4 |
| b9fg55d9-n5fy-s7fe-s5bh-856fbdc89720 | 1 | 5 |
How can I make nested JSON response in Go? I use GORM library. I want a JSON response like this:
[
{
"survey_id": "0cf1cf18-d5fd-474e-a8be-754fbdc89720",
"survey_name": "April",
"questions": [
{
"question_id": 1,
"question_text": "What is your favorite color?",
"options": [
{
"option_id": 1,
"option_text": "red"
},
{
"option_id": 2,
"option_text": "blue"
},
{
"option_id": 3,
"option_text": "grey"
},
]
}
]
},
{
"survey_id": "b9fg55d9-n5fy-s7fe-s5bh-856fbdc89720",
"survey_name": "May",
"questions": [
{
"question_id": 1,
"question_text": "What is your favorite color?",
"options": [
{
"option_id": 3,
"option_text": "grey"
},
{
"option_id": 4,
"option_text": "green"
},
{
"option_id": 5,
"option_text": "brown"
},
]
}
]
}
]
My models looks like this:
type Survey struct {
SurveyID string `gorm:"primary_key" json:"survey_id"`
SurveyName string `gorm:"not null" json:"survey_name"`
Questions []Question
}
type Question struct {
QuestionID int `gorm:"primary_key" json:"question_id"`
QuestionText string `gorm:"not null;unique" json:"question_text"`
Options []Option
}
type Option struct {
OptionID int `gorm:"primary_key" json:"option_id"`
OptionText string `gorm:"not null;unique" json:"option_text"`
}
I'm not sure abour GORM part, but with JSON you need to add struct tags on the nested objects as well:
type Survey struct {
...
Questions []Question `json:"questions"`
}
type Question struct {
...
Options []Option `json:"options"`
}
We're missing some scope from your code, and so it's quite hard to point you in the right direction. Are you asking about querying GORM so you get []Survey, or are you asking about marshalling []Survey? Anyway, you should add the tag to Questions too, as slomek replied.
However, try this:
To fetch nested data in m2m relation
type Survey struct {
gorm.Model
SurveyID string `gorm:"primary_key" json:"survey_id"`
SurveyName string `gorm:"not null" json:"survey_name"`
Questions []*Question `gorm:"many2many:survey_questions;"`
}
surveys := []*model.Survey{}
db := dbSession.Where(&model.Survey{SurveyID: id}).Preload("Questions").Find(&surveys)
My Controller Class:
public function postAction(Request $request)
{
$content = $request->getContent();
$category = $this->get('jms_serializer')->deserialize($content,'AppBundle\Entity\Category','json');
$errors = $this->get('validator')->validate($category);
if (count($errors) > 0) {
return new View("NAME LENGTH MUST BE >4",Response::HTTP_BAD_REQUEST);
} else {
$em = $this->getDoctrine()->getManager();
$em->persist($category);
$em->flush();
return new View($category, Response::HTTP_OK);
}
}
Entity:
class Category
{
private $id;
private $parent;
public function getChildren()
{
return $this->children;
}
private $children;
public function __construct()
{
$this->children = new ArrayCollection();
}
//setters and getters
Doctrine.yml:
AppBundle\Entity\Category:
type: entity
oneToMany:
children:
targetEntity: AppBundle\Entity\Category
mappedBy: parent
orderBy:
name: ASC
manyToOne:
parent:
targetEntity: AppBundle\Entity\Category
inversedBy: children
joinColumn:
name: parentId
referencedColumn: id
table: category
repositoryClass: AppBundle\Repository\CategoryRepository
id:
id:
column: id
type: integer
id: true
generator:
strategy: AUTO
fields:
name:
type: string
lenght: 255
When I send POST json request like this:
{
"name": "Child to 8",
"parentId": "8"
}
In MySQL table i do not recieve parentId:
mysql> select * from category;
+----+--------------------+----------+
| id | name | parentId |
+----+--------------------+----------+
| 1 | Primary Category | NULL |
| 2 | Secondary Category | 1 |
| 3 | D_child | 1 |
| 4 | F_child | 1 |
| 5 | Z_child | 1 |
| 6 | Y_child | 1 |
| 7 | H_child | 1 |
| 8 | A_child | 1 |
| 9 | Child to 8 | NULL |<----- must be 8
+----+--------------------+----------+
But after deserialization i receive this:
{
"id": 9,
"name": "Child to 8"
}
I understand that id is an integer, but parentId is already an object of class Category. But how to make it so that he also signed up?
How can i do this? Maybe I do not understand something ...
You need to have a .yml config file for serializer. In your case - Entity.Category.yml.
In this file add property of nested entities, set him a type of you Entity and for be sure accessors (setter, getter).
I am beginning with sails.js and I am completely lost with my sql queries.
I have the following tables :
genres
+-----------+--------------+------+-----+
| Field | Type | Null | Key |
+-----------+--------------+------+-----+
| id | int(6) | NO | PRI |
| name | varchar(100) | NO | |
| slug | varchar(255) | NO | |
| type | varchar(32) | NO | |
| parent_id | int(11) | YES | MUL |
+-----------+--------------+------+-----+
genres_radios
+----------+--------+------+-----+
| Field | Type | Null | Key |
+----------+--------+------+-----+
| genre_id | int(6) | NO | MUL |
| radio_id | int(6) | NO | MUL |
+----------+--------+------+-----+
radios
+-----------+--------------+------+-----+
| Field | Type | Null | Key |
+-----------+--------------+------+-----+
| id | int(5) | NO | PRI |
| name | varchar(100) | NO | |
| slug | varchar(100) | NO | |
| url | varchar(100) | NO | |
+-----------+--------------+------+-----+
I want to retrieve the radios and their associated genres. I managed to do it using the Model.query("Select * FROM ...") but I'd like to do it using the populate method. I had a look at the docs, but I am a bit confused with the "via", "through", ...
Well if you've followed the Sails.js Model documentation and the many-many association docs your models should look something like:
// api/models/genre.js
module.exports = {
attributes : {
name : {
type: 'string'
},
slug : {
type: 'string'
},
type : {
type: 'string'
},
radios : {
collection: 'radio',
via: 'genres'
}
}
}
// api/models/radio.js
module.exports = {
attributes : {
name : {
type: 'string'
},
slug : {
type: 'string'
},
url : {
type: 'string'
},
genres : {
collection: 'genre',
via: 'radios'
}
}
}
The many-many lookup table will be created for you internally by waterline. All you need to get the genres for your radio is populate the "genres" attribute.
Radio.findOne({name:"RadioName"}).populate("genres").then(function(radio){
console.log(radio); //radio.genres will have all the genres associated with this radio.
})
I really do recommend looking at the many-many association docs. They have exactly what you need.
This should do it :
// api/models/Genres.js
module.exports = {
attributes : {
name : {
type: 'string'
},
slug : {
type: 'string'
},
type : {
type: 'string'
},
radios : {
collection: 'Radios',
through: 'genres_radios'
}
}
}
// api/models/Radios.js
module.exports = {
attributes : {
name : {
type: 'string'
},
slug : {
type: 'string'
},
url : {
type: 'string'
},
genres : {
collection: 'genre',
through: 'genres_radios'
}
}
}
// api/models/Genres_radios.js
module.exports = {
attributes = {
'Genre_id': {
columnName:'genre_id',
type:'integer',
foreignKey:'true',
references:'genres',
on:'id',
via:'genres'
},
'Radio_id': {
columnName:'radio_id',
type:'integer',
foreignKey:'true',
references:'radios',
on:'id',
via:'radios'
}
}
}
And then you can make the following request :
Radio.findOne({name:"RadioName"}).populate("genres").then(function(radio){
console.log(radio);
})