Parse Nested JSON Array into Splunk Table - json

I have the below JSON event with nested array in splunk -:
{
"items":
[
{
"parts":
[
{
"code":"1","var":"","pNum":"101","counter":1019
},
{
"code":"0","var":"","pNum":"102","counter":1029
}
],
"se":"A1",
"so":"111"
},
{
"parts":
[
{
"code":"1","var":"","pNum":"301","counter":3019
},
{
"code":"0","var":"","pNum":"302","counter":3029
}
],
"se":"A3",
"so":"333"
},
{
"parts":
[
{
"code":"0","var":"","pNum":"401","counter":4019
}
],
"se":"A4",
"so":"444"
},
{
"parts":
[
{
"code":"1","var":"","pNum":"501","counter":5019
}
],
"se":"A5",
"so":"555"
}
],
"id":"x.9110790",
"cr":"x-273169"
}
I would like to extract this JSON into the below Splunk table -:
I tried to use spath as below but it is only giving wrong results given below -:
|makeresults | eval _raw="{
\"items\":
[
{
\"parts\":
[
{
\"code\":\"1\",\"var\":\"\",\"pNum\":\"101\",\"counter\":1019
},
{
\"code\":\"0\",\"var\":\"\",\"pNum\":\"102\",\"counter\":1029
}
],
\"se\":\"A1\",
\"so\":\"111\"
},
{
\"parts\":
[
{
\"code\":\"1\",\"var\":\"\",\"pNum\":\"301\",\"counter\":3019
},
{
\"code\":\"0\",\"var\":\"\",\"pNum\":\"302\",\"counter\":3029
}
],
\"se\":\"A3\",
\"so\":\"333\"
},
{
\"parts\":
[
{
\"code\":\"0\",\"var\":\"\",\"pNum\":\"401\",\"counter\":4019
}
],
\"se\":\"A4\",
\"so\":\"444\"
},
{
\"parts\":
[
{
\"code\":\"1\",\"var\":\"\",\"pNum\":\"501\",\"counter\":5019
}
],
\"se\":\"A5\",
\"so\":\"555\"
}
],
\"id\":\"x.9110790\",
\"cr\":\"x-273169\"
}" |spath |rename items as * | table id, cr,items{}.*
I am trying to parse the JSON type splunk logs for the first time. So please help with any hints to solve this.
Thank you

#Kripz
Can you please try this search?
| makeresults | eval _raw="{ \"items\": [ { \"parts\": [ {
\"code\":\"1\",\"var\":\"\",\"pNum\":\"101\",\"counter\":1019 }, {
\"code\":\"0\",\"var\":\"\",\"pNum\":\"102\",\"counter\":1029 } ],
\"se\":\"A1\", \"so\":\"111\" }, { \"parts\": [ {
\"code\":\"1\",\"var\":\"\",\"pNum\":\"301\",\"counter\":3019 }, {
\"code\":\"0\",\"var\":\"\",\"pNum\":\"302\",\"counter\":3029 } ],
\"se\":\"A3\", \"so\":\"333\" }, { \"parts\": [ {
\"code\":\"0\",\"var\":\"\",\"pNum\":\"401\",\"counter\":4019 } ],
\"se\":\"A4\", \"so\":\"444\" }, { \"parts\": [ {
\"code\":\"1\",\"var\":\"\",\"pNum\":\"501\",\"counter\":5019 } ],
\"se\":\"A5\", \"so\":\"555\" } ], \"id\":\"x.9110790\",
\"cr\":\"x-273169\" }" |kv | spath path=items{} output=items |
mvexpand items | fields cr id items | eval raw=items | kv | rename
parts{}.* as Parts* | eval
temp=mvzip(mvzip(Parts_code,Parts_counter),Parts_pNum) | mvexpand temp
| eval Parts_code=mvindex(split(temp,","),0)
,Parts_counter=mvindex(split(temp,","),1),Parts_pNum=mvindex(split(temp,","),2)
| table id cr se so Parts_code Parts_var Parts_counter Parts_pNum
Check my blog for same: https://www.crestdatasys.com/blogs/how-to-extract-complex-field-from-nested-json-events-using-splunk-spl/

Related

jq - remove non-matching fields in "object-array-with-objects"

Given the following JSON-object:
{
"meta": {
"data1": {
"keep": { "key": "value" }
}
},
"detail": {
"data2": [
{
"keep1": "keep1value",
"keep2": "keep2value",
"nokeep1": "abc"
}
],
"data3": [
{
"keep1": "keep1value",
"keep2": "keep2value",
"nokeep2": { "abc": "def" }
}
]
},
"drop" : "this"
}
I'm trying to clean it by removing unwanted fields, like "remove", "nokeep1" and "nokeep2".
However objects in the "data2" and "data3" arrays might contain more fields than the example "nokeepX", but will always contain "keep1" and "keep2" which I want to keep.
My desired output is the following JSON:
{
"meta": { "data1": { "keep": { "key": "value" } } },
"detail": {
"data2": [
{
"keep1": "keep1value",
"keep2": "keep2value"
}
],
"data3": [
{
"keep1": "keep1value",
"keep2": "keep2value"
}
]
}
}
I've managed to remove the "drop" field with this query:
jq 'def pick($paths): . as $root | reduce ($paths[]|[.]|flatten(1)) as $path ({}; . + setpath($path; $root|getpath($path))); pick([["meta"], ["detail", "data2"], ["detail", "data3"]])'
However I've been struggling to figure out how to remove the "nokeepX" fields - is it possible to accomplish this?
If you have only a limited set of properties, it could be easier not to remove unwanted fields, but create the output from the required fields only:
{
meta,
detail: .detail | {
data2: .data2 | map({ keep1, keep2 }),
data3: .data3 | map({ keep1, keep2 })
}
}
Output:
{
"meta": {
"data1": {
"keep": {
"key": "value"
}
}
},
"detail": {
"data2": [
{
"keep1": "keep1value",
"keep2": "keep2value"
}
],
"data3": [
{
"keep1": "keep1value",
"keep2": "keep2value"
}
]
}
}
The approach can be combined with dropping certain fields:
{
meta,
detail: .detail | {
data2: .data2 | map(del(.nokeep1)),
data3: .data3 | map(del(.nokeep2))
}
}
producing the same output as above.
Just provide all the concrete paths to del:
del(
.detail.data2[0].nokeep1,
.detail.data3[0].nokeep2,
.drop
)
Demo
Or generalize by e.g. traversing all array items (not just the first) using [] without indices:
del(
.detail.data2[].nokeep1,
.detail.data3[].nokeep2,
.drop
)
Demo
Or go arbitrarily deep using .., and just provide the deepest field names:
del(.. | objects | .nokeep1, .nokeep2, .drop)
Demo
Output:
{
"meta": {
"data1": {
"keep": "true"
}
},
"detail": {
"data2": [
{
"keep1": "keep1value",
"keep2": "keep2value"
}
],
"data3": [
{
"keep1": "keep1value",
"keep2": "keep2value"
}
]
}
}
For the other way round, you could list all the leaf paths using paths(scalars), filter out those where the deepest level .[-1] does not match your criteria, and use delpaths to remove the remaining leafs:
delpaths([paths(scalars) | select(
.[-1] | IN("keep", "keep1", "keep2") | not
)])
Demo

Recursively replace value with $path in JQ

Assuming that i have a complex JsonObject
{
"parent": {
"name": "value",
"child": {
"child_value1": "value",
"child_value2": "value",
"child_value3": ["value1","value2"],
"child_value4": {
"child_child_value1":"value"
}
}
}
}
I want to replace the value of each key, with the name of key prefixed with $
{
"parent": {
"name": "$name",
"child": {
"child_value1": "$child_child_value1",
"child_value2": "$child_child_value2",
"child_value3": ["$child_child_value3_0","$child_child_value3_1"],
"child_value4": {
"child_child_value1":"$child_child_value4_child_child_value1"
}
}
}
}
Is there a way to do it recursively?
EDIT
This is the currently configuration file that I am using
{
"apis": {
"order": {
"base_url": "$mapping_base_url"
},
"payment": {
"base_url": "$admin_base_url"
}
},
"features": {
"authentication": {
"authProviders": true,
"registration": false
}
},
"availableLocales":["en","es"]
}
This is the result using the recomended jq expression:
. |= reduce paths(strings) as $p (.; setpath($p; "$" + ($p | join("_"))))
{
"apis": {
"order": {
"base_url": "$apis_order_base_url"
},
"payment": {
"base_url": "$apis_payment_base_url"
}
},
"features": {
"authentication": {
"authProviders": true,
"registration": false
}
},
"availableLocales": [
"$availableLocales_0",
"$availableLocales_1"
]
}
You're looking for something like this:
.parent |=
reduce paths(strings) as $p (.;
setpath($p; "$" + ($p | join("_")))
)
Online demo

Find the average value in MongoDB from JSON

In my MongoDB (export from JSON file) I have database "dab" with structure like this:
id:"1"
datetime:"2020-05-08 5:09:56"
name:"namea"
lat:55.826738
lon:45.0423412
analysis:"[{"0":0.36965591924860347},{"5":0.10391287134268598},{"10":0.086884394..."
I'm using that db for spark analysis via MongoDB-Spark Connector.
My problem is field "analysis" - I need average result for all values from every interval ("0", "5", "10", ..., "1000"), so I have to sum 0.36965591924860347 + 0.10391287134268598 + 0.086884394 + ... and divide by number of intervals (I have 200 intervals in every column), and finally multiply the result by 100.
My solution would be this one:
db.collection.aggregate([
{
$set: {
analysis: {
$map: {
input: "$analysis",
in: { $objectToArray: "$$this" }
}
}
}
},
{
$set: {
analysis: {
$map: {
input: "$analysis",
in: { $first: "$$this.v" }
}
}
}
},
{ $set: { average: { $multiply: [ { $avg: "$analysis" }, 100 ] } } }
])
Mongo playground
You can use $reduce on that array,sum the values,and then divide with the number of elements and then multiply with 100
db.collection.aggregate([
{
"$addFields": {
"average": {
"$multiply": [
{
"$divide": [
{
"$reduce": {
"input": "$analysis",
"initialValue": 0,
"in": {
"$let": {
"vars": {
"sum": "$$value",
"data": "$$this"
},
"in": {
"$add": [
"$$sum",
{
"$arrayElemAt": [
{
"$arrayElemAt": [
{
"$map": {
"input": {
"$objectToArray": "$$data"
},
"as": "m",
"in": [
"$$m.k",
"$$m.v"
]
}
},
0
]
},
1
]
}
]
}
}
}
}
},
{
"$size": "$analysis"
}
]
},
100
]
}
}
}
])
You can test the code here
But this code has 1 problem, you save data in documents, and MongoDB
doesn't have a function like get(document,$$k), the new MongoDB v5.0 has a $getField but still accepts only constants no variables.
I mean we cant do in your case getField(doc,"5").
So we have the cost of converting each document to an array.

Reconstructing JSON with jq

I have a JSON like this (sample.json):
{
"sheet1": [
{
"hostname": "sv001",
"role": "web",
"ip1": "172.17.0.3"
},
{
"hostname": "sv002",
"role": "web",
"ip1": "172.17.0.4"
},
{
"hostname": "sv003",
"role": "db",
"ip1": "172.17.0.5",
"ip2": "172.18.0.5"
}
],
"sheet2": [
{
"hostname": "sv004",
"role": "web",
"ip1": "172.17.0.6"
},
{
"hostname": "sv005",
"role": "db",
"ip1": "172.17.0.7"
},
{
"hostname": "vsv006",
"role": "db",
"ip1": "172.17.0.8"
}
],
"sheet3": []
}
I want to extract data like this:
sheet1
jq '(something command)' sample.json
{
"web": {
"hosts": [
"172.17.0.3",
"172.17.0.4"
]
},
"db": {
"hosts": [
"172.17.0.5"
]
}
}
Is it possible to perform the reconstruction with jq map?
(I will reuse the result for ansible inventory.)
Here's a short, straight-forward and efficient solution -- efficient in part because it avoids group_by by courtesy of the following generic helper function:
def add_by(f;g): reduce .[] as $x ({}; .[$x|f] += [$x|g]);
.sheet1
| add_by(.role; .ip1)
| map_values( {hosts: .} )
Output
This produces the required output:
{
"web": {
"hosts": [
"172.17.0.3",
"172.17.0.4"
]
},
"db": {
"hosts": [
"172.17.0.5"
]
}
}
If the goal is to regroup the ips by their roles within each sheet you could do this:
map_values(
reduce group_by(.role)[] as $g ({};
.[$g[0].role].hosts = [$g[] | del(.hostname, .role)[]]
)
)
Which produces something like this:
{
"sheet1": {
"db": {
"hosts": [
"172.17.0.5",
"172.18.0.5"
]
},
"web": {
"hosts": [
"172.17.0.3",
"172.17.0.4"
]
}
},
"sheet2": {
"db": {
"hosts": [
"172.17.0.7",
"172.17.0.8"
]
},
"web": {
"hosts": [
"172.17.0.6"
]
}
},
"sheet3": {}
}
https://jqplay.org/s/3VpRc5l4_m
If you want to flatten all to a single object keeping only unique ips, you can keep everything mostly the same, you'll just need to flatten the inputs prior to grouping and remove the map_values/1 call.
$ jq -n '
reduce ([inputs[][]] | group_by(.role)[]) as $g ({};
.[$g[0].role].hosts = ([$g[] | del(.hostname, .role)[]] | unique)
)
'
{
"db": {
"hosts": [
"172.17.0.5",
"172.17.0.7",
"172.17.0.8",
"172.18.0.5"
]
},
"web": {
"hosts": [
"172.17.0.3",
"172.17.0.4",
"172.17.0.6"
]
}
}
https://jqplay.org/s/ZGj1wC8hU3

Mongodb insert with multiple conditions

I'm having multiple documents in a collection, each document has this data structure :
{
_id: "some object id",
data1: [
{
data2_id : 13233,
data2: [
{
sub_data1: "text1",
sub_data2: "text2",
sub_data3: "text3",
},
{
sub_data1: "text4",
sub_data2: "text5",
sub_data3: "text6",
}
]
},
{
data2_id : 53233,
data2: [
{
sub_data1: "text4",
sub_data2: "text5",
sub_data3: "text6",
}
...
]
},
{
data2_id : 56233,
data2: [
{
sub_data1: "text7",
sub_data2: "text8",
sub_data3: "text9",
}
...
]
},
{
data2_id : 53236,
data2: [
{
sub_data1: "text10",
sub_data2: "text22",
sub_data3: "text33",
}
...
]
}
]
}
I'd like to update to a set of ids that maches some condition, update only the sub object within the document.
I've tries this:
db.collection.update({
"$and": [
{
"_id": {
"$in": [
{
"$id": "54369aca9bc25af3ca8b4568"
},
{
"$id": "54369aca9bc25af3ca8b4562"
}
]
}
},
{
"data1.data2": {
"$elemMatch": {
"sub_data1": "text4",
"sub_data2": "text5"
}
}
}
]
},
{
"data1.data2.$.sub_data3" : "text updated"
}
)
But I get the following error:
Update of data into MongoDB failed: dev.**.com:27017: cannot use the part (data2 of data1.data2.0.sub_data3) to traverse the element...
Any Ideas?
There is an open issue here that imposes a limitation when trying to update elements of an array nested within another array.
Besides, there are some improvements you can do here:
For your query you don't need the $and
db.collection.update(
{
"_id": {
"$in": [
{"$id": "54369aca9bc25af3ca8b4568"},
{"$id": "54369aca9bc25af3ca8b4562"}
]},
"data1.data2": {
"$elemMatch": {
"sub_data1": "text4",
"sub_data2": "text5"
}
},{..update...})
You might want to use $set:
db.collection.update(query,{ $set:{"name": "Mike"} })
Otherwise, you might lose the rest of the data within your document.