Dynatrace API - Extract request count(success, 4xx, 5xx) for each service by service name - dynatrace

I am trying to extract the request count request count(success, 4xx, 5xx) for each web service under my project. I tried below query , but it is not giving what I am expected.
builtin:service.requestCount.total metric key
https://www.dynatrace.xx.com/e/12345-6539-4ea3-aab8-a9985ae713d4/api/v2/metrics/query?metricSelector=builtin:service.requestCount.total&entitySelector=type(SERVICE)&from=now-6h&resolution=3w&api-token=<token_name>
builtin:service.keyRequest.total.count metric key
https://www.dynatrace.xx.com/e/1234-6539-4ea3-aab8-a9985ae713d4/api/v2/metrics/query?metricSelector=builtin:service.keyRequest.total.count&entitySelector=type(SERVICE)&from=now-6h&resolution=3w&api-token=<token_name>
In multi dimensional analysis chart I could see the count correctly
Can someone help me with the API query, where I can pass the service name as parameter in request and get the request count associated with that service request.
Edited :
Please find below API response for the query
> {
> "totalCount": 2,
> "nextPageKey": null,
> "resolution": "3m",
> "result": [
> {
> "metricId": "builtin:service.requestCount.total:filter(and(in(\"dt.entity.service\",entitySelector(\"type(service),entityName(~\"ControllerName~\")\")))):splitBy(\"dt.entity.service\"):avg:auto:sort(value(avg,descending)):limit(100)",
> "data": [
> {
> "dimensions": [
> "SERVICE-376AE4987839A1D7"
> ],
> "dimensionMap": {
> "dt.entity.service": "SERVICE-376AE4987839A1D7"
> },
> "timestamps": [
> 1645673400000,
> 1645673580000,
> 1645673760000,
> 1645673940000,
> 1645674120000,
> 1645674300000,
> 1645674480000,
> 1645674660000,
> 1645674840000,
> 1645675020000,
> 1645675200000,
> 1645675380000,
> 1645675560000,
> 1645675740000,
> 1645675920000,
> 1645676100000,
> 1645676280000,
> 1645676460000,
> 1645676640000,
> 1645676820000,
> 1645677000000,
> 1645677180000,
> 1645677360000,
> 1645677540000,
> 1645677720000,
> 1645677900000,
> 1645678080000,
> 1645678260000,
> 1645678440000,
> 1645678620000,
> 1645678800000,
> 1645678980000,
> 1645679160000,
> 1645679340000,
> 1645679520000,
> 1645679700000,
> 1645679880000,
> 1645680060000,
> 1645680240000,
> 1645680420000,
> 1645680600000,
> 1645680780000,
> 1645680960000,
> 1645681140000,
> 1645681320000,
> 1645681500000,
> 1645681680000,
> 1645681860000,
> 1645682040000,
> 1645682220000,
> 1645682400000,
> 1645682580000,
> 1645682760000,
> 1645682940000,
> 1645683120000,
> 1645683300000,
> 1645683480000,
> 1645683660000,
> 1645683840000,
> 1645684020000,
> 1645684200000,
> 1645684380000,
> 1645684560000,
> 1645684740000,
> 1645684920000,
> 1645685100000,
> 1645685280000,
> 1645685460000,
> 1645685640000,
> 1645685820000,
> 1645686000000,
> 1645686180000,
> 1645686360000,
> 1645686540000,
> 1645686720000,
> 1645686900000,
> 1645687080000,
> 1645687260000,
> 1645687440000,
> 1645687620000,
> 1645687800000,
> 1645687980000,
> 1645688160000,
> 1645688340000,
> 1645688520000,
> 1645688700000,
> 1645688880000,
> 1645689060000,
> 1645689240000,
> 1645689420000,
> 1645689600000,
> 1645689780000,
> 1645689960000,
> 1645690140000,
> 1645690320000,
> 1645690500000,
> 1645690680000,
> 1645690860000,
> 1645691040000,
> 1645691220000,
> 1645691400000,
> 1645691580000,
> 1645691760000,
> 1645691940000,
> 1645692120000,
> 1645692300000,
> 1645692480000,
> 1645692660000,
> 1645692840000,
> 1645693020000,
> 1645693200000,
> 1645693380000,
> 1645693560000,
> 1645693740000,
> 1645693920000,
> 1645694100000,
> 1645694280000,
> 1645694460000,
> 1645694640000,
> 1645694820000,
> 1645695000000
> ],
> "values": [
> 6,
> null,
> null,
> 1,
> 1,
> null,
> null,
> 2,
> null,
> null,
> 2,
> null,
> null,
> 2,
> 1,
> null,
> null,
> 5,
> 1,
> 4,
> 1,
> 2,
> 6,
> 2,
> 5,
> 7,
> 1,
> 3,
> 3,
> 4,
> null,
> 5,
> 9,
> 8,
> 3,
> 3,
> 4,
> null,
> 3,
> 1,
> 2,
> 3,
> 1,
> 5,
> 2,
> 5,
> null,
> 1,
> 4,
> 1,
> null,
> 2,
> 2,
> 1,
> 1,
> 2,
> 1,
> null,
> 1,
> null,
> null,
> 5,
> 1,
> null,
> null,
> 1,
> 1,
> 4,
> 1,
> null,
> null,
> 2,
> 3,
> null,
> null,
> 2,
> null,
> null,
> 2,
> null,
> 1,
> null,
> 2,
> 2,
> null,
> 2,
> 7,
> 1,
> 1,
> null,
> null,
> 1,
> 1,
> 1,
> 4,
> null,
> null,
> 1,
> null,
> null,
> null,
> 4,
> null,
> 1,
> null,
> 1,
> null,
> null,
> 1,
> null,
> null,
> 5,
> 2,
> 6,
> 5,
> null,
> null,
> 3,
> 2,
> 3,
> null
> ]
> },
> {
> "dimensions": [
> "SERVICE-BC7B97C170AF3FD6"
> ],
> "dimensionMap": {
> "dt.entity.service": "SERVICE-BC7B97C170AF3FD6"
> },
> "timestamps": [
> 1645673400000,
> 1645673580000,
> 1645673760000,
> 1645673940000,
> 1645674120000,
> 1645674300000,
> 1645674480000,
> 1645674660000,
> 1645674840000,
> 1645675020000,
> 1645675200000,
> 1645675380000,
> 1645675560000,
> 1645675740000,
> 1645675920000,
> 1645676100000,
> 1645676280000,
> 1645676460000,
> 1645676640000,
> 1645676820000,
> 1645677000000,
> 1645677180000,
> 1645677360000,
> 1645677540000,
> 1645677720000,
> 1645677900000,
> 1645678080000,
> 1645678260000,
> 1645678440000,
> 1645678620000,
> 1645678800000,
> 1645678980000,
> 1645679160000,
> 1645679340000,
> 1645679520000,
> 1645679700000,
> 1645679880000,
> 1645680060000,
> 1645680240000,
> 1645680420000,
> 1645680600000,
> 1645680780000,
> 1645680960000,
> 1645681140000,
> 1645681320000,
> 1645681500000,
> 1645681680000,
> 1645681860000,
> 1645682040000,
> 1645682220000,
> 1645682400000,
> 1645682580000,
> 1645682760000,
> 1645682940000,
> 1645683120000,
> 1645683300000,
> 1645683480000,
> 1645683660000,
> 1645683840000,
> 1645684020000,
> 1645684200000,
> 1645684380000,
> 1645684560000,
> 1645684740000,
> 1645684920000,
> 1645685100000,
> 1645685280000,
> 1645685460000,
> 1645685640000,
> 1645685820000,
> 1645686000000,
> 1645686180000,
> 1645686360000,
> 1645686540000,
> 1645686720000,
> 1645686900000,
> 1645687080000,
> 1645687260000,
> 1645687440000,
> 1645687620000,
> 1645687800000,
> 1645687980000,
> 1645688160000,
> 1645688340000,
> 1645688520000,
> 1645688700000,
> 1645688880000,
> 1645689060000,
> 1645689240000,
> 1645689420000,
> 1645689600000,
> 1645689780000,
> 1645689960000,
> 1645690140000,
> 1645690320000,
> 1645690500000,
> 1645690680000,
> 1645690860000,
> 1645691040000,
> 1645691220000,
> 1645691400000,
> 1645691580000,
> 1645691760000,
> 1645691940000,
> 1645692120000,
> 1645692300000,
> 1645692480000,
> 1645692660000,
> 1645692840000,
> 1645693020000,
> 1645693200000,
> 1645693380000,
> 1645693560000,
> 1645693740000,
> 1645693920000,
> 1645694100000,
> 1645694280000,
> 1645694460000,
> 1645694640000,
> 1645694820000,
> 1645695000000
> ],
> "values": [
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> 1,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> 1,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> 1,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> 1,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> 1,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> null,
> 1,
> null,
> null,
> null,
> null
> ]
> }
> ]
> }
> ] }

The metrics have a dimension service, i.e. you can filter by certain services via the filter on the service-name.
You can build, test and refine the query in the UI:
And then use the tab Code to see the metric-selector that you can also use in the REST API:

I don't know if you finally figured out, but
builtin:service.keyRequest.total.count
Yhe response of this is an array containing each point based on a timestamp, similar to a dynatrace chart. In order to get the total amount, you need to do some operations:
builtin:service.keyRequest.total.count:fold(sum)
Using fold:(sum), one single value is responded (you are using an add operation).
Common data operations like median, average, máximum works as well—dynatrace call them aggregations; check the documentation.

Related

Range access on multiple columns using an index

As per my understanding of the MySQL documentation which states:
The optimizer attempts to use additional key parts to determine the interval as long as the comparison operator is =, <=>, or IS NULL. If the operator is >, <, >=, <=, !=, <>, BETWEEN, or LIKE, the optimizer uses it but considers no more key parts.
https://dev.mysql.com/doc/refman/5.6/en/range-optimization.html#range-access-multi-part
I was trying to optimize the query the following query:
SELECT col3 FROM tbl2 WHERE col3 = 'fHI' AND col1 > 20 AND col4 > 0.5;
where tbl2 is defined as:
CREATE TABLE `tbl2` (
`col1` int NOT NULL,
`col2` varchar(512) NOT NULL DEFAULT '',
`col3` varchar(512) DEFAULT NULL,
`col4` double DEFAULT NULL,
PRIMARY KEY (`col2`),
KEY `cd1` (`col3`,`col1`,`col4`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
I was under the impression that the columns used would be col3 and col1 since the predicate on col1 uses the > operator. However, the optimizer plan says that it uses all three index key columns as per the plan.
{
"query_block": {
"select_id": 1,
"cost_info": {
"query_cost": "92.10"
},
"table": {
"table_name": "tbl2",
"access_type": "index",
"possible_keys": [
"cd1"
],
"key": "cd1",
"used_key_parts": [
"col3",
"col1",
"col4"
],
"key_length": "528",
"rows_examined_per_scan": 901,
"rows_produced_per_join": 881,
"filtered": "97.78",
"using_index": true,
"cost_info": {
"read_cost": "4.00",
"eval_cost": "88.10",
"prefix_cost": "92.10",
"data_read_per_join": "901K"
},
"used_columns": [
"col1",
"col3",
"col4"
],
"attached_condition": "((`test`.`tbl2`.`col3` = 'fHI') and (`test`.`tbl2`.`col1` > 20) and (`test`.`tbl2`.`col4` > 0.5))"
}
}
}
Is the documentation wrong or is col4 being used in a different way?
mysql> explain SELECT col3 FROM tbl2 WHERE col3 = 'fHI' AND col1 > 20 AND col4 > 0.5;
+----+-------------+-------+------------+-------+---------------+------+---------+------+------+----------+--------------------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra |
+----+-------------+-------+------------+-------+---------------+------+---------+------+------+----------+--------------------------+
| 1 | SIMPLE | tbl2 | NULL | index | cd1 | cd1 | 528 | NULL | 1 | 100.00 | Using where; Using index |
+----+-------------+-------+------------+-------+---------------+------+---------+------+------+----------+--------------------------+
I think the answer is that EXPLAIN is sloppy in multiple ways.
I cannot explain where "528" comes from.
Anyway, note that the Optimizer Trace mentions only
"ranges": [
"fHI <= col3 <= fHI AND 20 < col1"
],
Optimizer Trace:
{
"steps": [
{
"join_preparation": {
"select#": 1,
"steps": [
{
"expanded_query": "/* select#1 */ select `tbl2`.`col3` AS `col3` from `tbl2` where ((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5)) limit 33"
}
]
}
},
{
"join_optimization": {
"select#": 1,
"steps": [
{
"condition_processing": {
"condition": "WHERE",
"original_condition": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))",
"steps": [
{
"transformation": "equality_propagation",
"resulting_condition": "((`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5) and multiple equal('fHI', `tbl2`.`col3`))"
},
{
"transformation": "constant_propagation",
"resulting_condition": "((`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5) and multiple equal('fHI', `tbl2`.`col3`))"
},
{
"transformation": "trivial_condition_removal",
"resulting_condition": "((`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5) and multiple equal('fHI', `tbl2`.`col3`))"
}
]
}
},
{
"substitute_generated_columns": {
}
},
{
"table_dependencies": [
{
"table": "`tbl2`",
"row_may_be_null": false,
"map_bit": 0,
"depends_on_map_bits": [
]
}
]
},
{
"ref_optimizer_key_uses": [
{
"table": "`tbl2`",
"field": "col3",
"equals": "'fHI'",
"null_rejecting": true
}
]
},
{
"rows_estimation": [
{
"table": "`tbl2`",
"range_analysis": {
"table_scan": {
"rows": 1,
"cost": 2.45
},
"potential_range_indexes": [
{
"index": "PRIMARY",
"usable": false,
"cause": "not_applicable"
},
{
"index": "cd1",
"usable": true,
"key_parts": [
"col3",
"col1",
"col4",
"col2"
]
}
],
"best_covering_index_scan": {
"index": "cd1",
"cost": 0.35,
"chosen": true
},
"setup_range_conditions": [
],
"group_index_range": {
"chosen": false,
"cause": "not_group_by_or_distinct"
},
"skip_scan_range": {
"potential_skip_scan_indexes": [
{
"index": "cd1",
"usable": false,
"cause": "prefix_not_const_equality"
}
]
},
"analyzing_range_alternatives": {
"range_scan_alternatives": [
{
"index": "cd1",
"ranges": [
"fHI <= col3 <= fHI AND 20 < col1"
],
"index_dives_for_eq_ranges": true,
"rowid_ordered": false,
"using_mrr": false,
"index_only": true,
"rows": 1,
"cost": 0.36,
"chosen": false,
"cause": "cost"
}
],
"analyzing_roworder_intersect": {
"usable": false,
"cause": "too_few_roworder_scans"
}
}
}
}
]
},
{
"considered_execution_plans": [
{
"plan_prefix": [
],
"table": "`tbl2`",
"best_access_path": {
"considered_access_paths": [
{
"access_type": "ref",
"index": "cd1",
"chosen": false,
"cause": "range_uses_more_keyparts"
},
{
"rows_to_scan": 1,
"access_type": "scan",
"resulting_rows": 1,
"cost": 0.35,
"chosen": true
}
]
},
"condition_filtering_pct": 100,
"rows_for_plan": 1,
"cost_for_plan": 0.35,
"chosen": true
}
]
},
{
"attaching_conditions_to_tables": {
"original_condition": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))",
"attached_conditions_computation": [
],
"attached_conditions_summary": [
{
"table": "`tbl2`",
"attached": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))"
}
]
}
},
{
"finalizing_table_conditions": [
{
"table": "`tbl2`",
"original_table_condition": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))",
"final_table_condition ": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))"
}
]
},
{
"refine_plan": [
{
"table": "`tbl2`"
}
]
}
]
}
},
{
"join_execution": {
"select#": 1,
"steps": [
]
}
}
]
}

Parsing JSON without key names to retrieve a column

I am loading json from data.gov that does not have key names for the values in the json data, e.g. below: the metadata is available separately.
I am able to load the json into a variant column, but cannot see how to parse and query for specific columns, e.g. Frankford below - I have tried JSONcol:data[0] which returns the entire entry, but am unable to see how to specify column 4, say.
{
data: [ [ "row-ea6u~fkaa~32ry", "0B8F94EE5292", 0, 1486063689, null, 1486063689, null, "{ }", "410", "21206", "Frankford", "2", "NORTHEASTERN", [ "{\"address\": \"4509 BELAIR ROAD\", \"city\": \"Baltimore\", \"state\": \"MD\", \"zip\": \"\"}", null, null, null, true ], null, null, null ]]
}
The following code is used to create and load the snowflake table:
create or replace table snowpipe.public.snowtable(jsontext variant);
copy into snowpipe.public.snowtable
from #snowpipe.public.snowstage
file_format = (type = 'JSON')
Not exactly sure how your varient data is look once you have loaded it, but experimenting on variant via PARSE_JSON for you object. Which I has to double slash the \ to make it valid sql.
select
PARSE_JSON('{ data: [ [ "row-ea6u~fkaa~32ry", "0B8F94EE5292", 0, 1486063689, null, 1486063689, null, "{ }", "410", "21206", "Frankford", "2", "NORTHEASTERN", [ "{\\"address\\": \\"4509 BELAIR ROAD\\", \\"city\\": \\"Baltimore\\", \\"state\\": \\"MD\\", \\"zip\\": \\"\\"}", null, null, null, true ], null, null, null ]]}') as j
,j:data as jd
,jd[0] as jd0
,jd0[3] as jd0_3
,array_slice(j:data[0],3,5) as jd0_3to4
;
shows that you can use [0] notation to index arrays, and thus get the results:
J: { "data": [ [ "row-ea6u~fkaa~32ry", "0B8F94EE5292", 0, 1486063689, null, 1486063689, null, "{ }", "410", "21206", "Frankford", "2", "NORTHEASTERN", [ "{\"a...
JD: [ [ "row-ea6u~fkaa~32ry", "0B8F94EE5292", 0, 1486063689, null, 1486063689, null, "{ }", "410", "21206", "Frankford", "2", "NORTHEASTERN", [ "{\"address\": \"4509 BELAIR ROAD\", \"city\": \"...
JD0: [ "row-ea6u~fkaa~32ry", "0B8F94EE5292", 0, 1486063689, null, 1486063689, null, "{ }", "410", "21206", "Frankford", "2", "NORTHEASTERN", [ "{\"address\": \"4509 BELAIR ROAD\", \"city\": \"Baltimore\", \"state\": \"MD\", \"...
JD0_3: 1486063689
JD0_3TO4: [ 1486063689, null ]
so if you have unknown amount of first level elements in data that you want to access, then use LATERAL FLATTEN like so:
WITH data as (
select PARSE_JSON('{ data: [ [ "row-1", "0B8", 0 ],["row-2", "F94", 2],
["row-3", "EE5", 4]]}') as j
)
select f.value[0]::text as row_name
,f.value[1]::text as serial_number
,f.value[2]::number as num
from data d,
lateral flatten(input=> d.j:data) f;
gives:
ROW_NAME SERIAL_NUMBER NUM
row-1 0B8 0
row-2 F94 2
row-3 EE5 4

Retrieving sub-fields from parsed JSON in snowflake

I'm having some difficulty getting the individual components of the address component
with data as (select
PARSE_JSON('{ "data" : [
[ "row-ea6u~fkaa~32ry", "00000000-0000-0000-01B7-0B8F94EE5292", 0, 1486063689, null, 1486063689, null, "{ }", "410", "21206", "Frankford", "2", "NORTHEASTERN", [ "{\"address\": \"4509 BELAIR ROAD\", \"city\": \"Baltimore\", \"state\": \"MD\", \"zip\": \"\"}", null, null, null, true ], null, null, null ]
}') as j
)
select f.value[1][0]::text
from data d,
lateral flatten(input=> d.j:data,recursive=>TRUE) f;
f.value[1][0] has a field address
{"address": "4509 BELAIR ROAD", "city": "Baltimore", "state": "MD", "zip": ""}
but
f.value[1][0].address returns null
How do I get the individual attributes of f.value[1] like address, city, etc?
The problem is given you have three levels of nested data, you should not be using recursive=>TRUE as the objects are not the same, so you cannot make anything of value out of the data. You need to break the different layers apart manually.
with data as (
select
PARSE_JSON('{ data: [ [ "row-ea6u~fkaa~32ry", "0B8F94EE5292", 0, 1486063689, null, 1486063689, null, "{ }", "410", "21206", "Frankford", "2", "NORTHEASTERN", [ "{\\"address\\": \\"4509 BELAIR ROAD\\", \\"city\\": \\"Baltimore\\", \\"state\\": \\"MD\\", \\"zip\\": \\"\\"}", null, null, null, true ], null, null, null ]]}') as j
), data_rows as (
select f.value as r
from data d,
lateral flatten(input=> d.j:data) f
)
select dr.r[0] as v0
,dr.r[1] as v1
,dr.r[2] as v2
,dr.r[3] as v3
,f.value as addr_n
from data_rows dr,
lateral flatten(input=> dr.r[13]) f;
so this get all the rows (of which your example has only one) the unpacks the values of interest (you will need to complete this part and give v0 - vN meaning) but there is an array or addresses
V0 V1 V2 V3 ADDR_N
"row-ea6u~fkaa~32ry" "0B8F94EE5292" 0 1486063689 "{\"address\": \"4509 BELAIR ROAD\", \"city\": \"Baltimore\", \"state\": \"MD\", \"zip\": \"\"}"
"row-ea6u~fkaa~32ry" "0B8F94EE5292" 0 1486063689 null
"row-ea6u~fkaa~32ry" "0B8F94EE5292" 0 1486063689 null
"row-ea6u~fkaa~32ry" "0B8F94EE5292" 0 1486063689 null
"row-ea6u~fkaa~32ry" "0B8F94EE5292" 0 1486063689 true
now to decode the address as json ,parse_json(f.value) as addr_n does that, so you can break it apart like:
with data as (
select
PARSE_JSON('{ data: [ [ "row-ea6u~fkaa~32ry", "0B8F94EE5292", 0, 1486063689, null, 1486063689, null, "{ }", "410", "21206", "Frankford", "2", "NORTHEASTERN", [ "{\\"address\\": \\"4509 BELAIR ROAD\\", \\"city\\": \\"Baltimore\\", \\"state\\": \\"MD\\", \\"zip\\": \\"\\"}", null, null, null, true ], null, null, null ]]}') as j
), data_rows as (
select f.value as r
from data d,
lateral flatten(input=> d.j:data) f
)
select dr.r[0] as v0
,dr.r[1] as v1
,dr.r[2] as v2
,dr.r[3] as v3
,parse_json(f.value) as addr_n
,addr_n:address::text as addr_address
,addr_n:city::text as addr_city
,addr_n:state::text as addr_state
,addr_n:zip::text as addr_zip
from data_rows dr,
lateral flatten(input=> dr.r[13]) f;
you can ether leave the addr_n dummy variable or swap it out by cut'n'pasting it like so:
,parse_json(f.value):address::text as addr_address
,parse_json(f.value):city::text as addr_city
,parse_json(f.value):state::text as addr_state
,parse_json(f.value):zip::text as addr_zip
You can follow the article for step-by-step for achieving it:
https://community.snowflake.com/s/article/Using-lateral-flatten-to-extract-data-from-JSON-internal-field
Hope this helps!

object to json in spring

Below is the code snippet from different project files
ProjectRepository.java
#Query(value=" SELECT p.id as project_id,p.project_status, t.id as test_suite_id, t.test_suite_status, ts.id as test_script_id,ts.test_script_status, tss.id as test_step_id, tss.test_step_status FROM project p LEFT OUTER JOIN test_suite t ON (p.id = t.project_id AND t.test_suite_status = 1) LEFT OUTER JOIN test_script ts ON (t.id = ts.test_suite_id AND ts.test_script_status=1) LEFT OUTER JOIN test_step tss ON (ts.id = tss.test_script_id AND tss.test_step_status=1) where p.team_id=:teamId and p.project_status=1 ",nativeQuery=true)
public List<Object> getActiveProjectsWithTeamId(#Param("teamId") Long teamId);
projectService.java
List<Object> findActiveProjectsByTeamId(Long id) throws DAOException;
projectServiceImpl.java
#Override
#Transactional(readOnly = true)
public List<Object> findActiveProjectsByTeamId(Long id) throws DAOException {
log.info("entered into ProjectServiceImpl:findOneByTeamId");
if (id != null) {
try {
// returns all projects of the specified team whose
// project_status is active
List<Object> project=projectRepository.getActiveProjectsWithTeamId(id);
return project;
} catch (Exception e) {
log.error("Exception raised while retrieving the project of the mentioned ID from database : "
+ e.getMessage());
throw new DAOException("Exception occured while retrieving the required project");
} finally {
log.info("exit from ProjectServiceImpl:findOneByTeamId");
}
}
return null;
}
I am getting below Output-
[ [ 1, true, 1, true, null, null, null, null ], [ 1, true, 2, true, null, null, null, null ], [ 1, true, 3, true, null, null, null, null ], [ 1, true, 5, true, null, null, null, null ], [ 1, true, 6, true, null, null, null, null ] ]
but I want the result in key value pair

MySQL select from view with user variables - Unexpected result

I have 2 tables and a view. In product_oper I have some products that I receive (when id_dest is 1) and that I sell (when id_src is 1). The table product_doc contains the date when the operation took place.
CREATE TABLE product_doc (
id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
doc_date date NOT NULL,
doc_no char(16) NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB;
INSERT INTO product_doc (id,doc_date,doc_no) VALUES
(1,'2009-10-07','1'),
(2,'2009-10-14','2'),
(3,'2009-10-28','4'),
(4,'2009-10-21','3');
CREATE TABLE product_oper (
id bigint(12) unsigned NOT NULL AUTO_INCREMENT,
id_document bigint(20) unsigned NOT NULL,
prod_id bigint(12) unsigned NOT NULL DEFAULT '0',
prod_quant decimal(16,4) NOT NULL DEFAULT '1.0000',
prod_value decimal(18,2) NOT NULL DEFAULT '0.00',
id_dest bigint(20) unsigned NOT NULL,
id_src bigint(20) unsigned NOT NULL,
PRIMARY KEY (id)
) ENGINE=InnoDB;
INSERT INTO product_oper (id,id_document,prod_id,prod_quant,prod_value,id_dest,id_src)
VALUES
(10,1,1,'2.0000', '5.00',1,0),
(11,3,1,'0.5000', '1.20',0,1),
(12,1,2,'3.0000','26.14',1,0),
(13,2,2,'0.5000','10.20',0,1),
(14,3,2,'0.3000', '2.60',0,1),
(15,4,2,'1.0000', '0.40',1,0);
In the view I want to see all the operations and the dates.
CREATE VIEW product_oper_view AS
SELECT product_oper.*, product_doc.doc_date AS doc_date, product_doc.doc_no AS doc_no
FROM product_oper JOIN product_doc ON product_oper.id_document = product_doc.id
WHERE 1;
Now I want to see the operations of a single product, and the amount and value at a specific date.
SET #amount=0.000, #balance=0.00;
SELECT product_oper_view.*,
IF(id_dest<>0, prod_quant, NULL) AS q_in,
IF(id_dest<>0, prod_value, NULL) AS v_in,
IF(id_src<>0, prod_quant, NULL) AS q_out,
IF(id_src<>0, prod_value, NULL) AS v_out,
#amount:=#amount + IF(id_dest<>0, 1, -1)*prod_quant AS q_amount,
#balance:=#balance + IF(id_dest<>0, 1, -1)*prod_value AS v_balance
FROM product_oper_view
WHERE prod_id=2 AND (id_dest=1 OR id_src=1)
ORDER BY doc_date;
The result I get is strange:
id, id_ prod_ prod_ id_ id_ doc_date, q_in, v_in, q_ v_
doc, quant,value,dest,src, q_out, v_out, amount, balance
12, 1, 3.0000, 26.14, 1, 0, '2009-10-07', 3.0000, 26.14, NULL , NULL, 3.000, 26.14
13, 2, 0.5000, 10.20, 0, 1, '2009-10-14', NULL , NULL, 0.5000, 10.20, 2.500, 15.94
15, 4, 1.0000, 0.40, 1, 0, '2009-10-21', 1.0000, 0.40, NULL , NULL, 3.200, 13.74
14, 3, 0.3000, 2.60, 0, 1, '2009-10-28', NULL , NULL, 0.3000, 2.60, 2.200, 13.34
The amount starts from zero,
at row 1: +3 => 3 (ok)
at row 2: -0.5 => 2.5 (ok)
at row 3: +1 => 3.2 (???)
at row 4: -0.3 => 2.2 (???)
It seems that MySQL doesn't take the order of rows specified in the ORDER BY clause when executing the statement, and it looks after the id: See that document with id 4 is before document with id 3 ('2009-10-21' < '2009-10-28')
Am I doing something wrong, or is it a bug of MySQL?
If I'm not totally wrong the ORDER-operation is one of the last things done when preparing the result set. Therefore your calculations are done before ordering the results. The correct way to circumvent this problem should be to use a subselect:
SET #amount=0.000, #balance=0.00;
SELECT p.*,
#amount:=#amount + IF(p.id_dest <> 0, 1, -1) * p.prod_quant AS q_amount,
#balance:=#balance + IF(p.id_dest <> 0, 1, -1) * p.prod_value AS v_balance
FROM (
SELECT product_oper_view.*,
IF(product_oper_view.id_dest <> 0, product_oper_view.prod_quant, NULL) AS q_in,
IF(product_oper_view.id_dest <> 0, product_oper_view.prod_value, NULL) AS v_in,
IF(product_oper_view.id_src <> 0, product_oper_view.prod_quant, NULL) AS q_out,
IF(product_oper_view.id_src <> 0, product_oper_view.prod_value, NULL) AS v_out
FROM product_oper_view
WHERE product_oper_view.prod_id = 2
AND (product_oper_view.id_dest = 1 OR product_oper_view.id_src = 1)
ORDER BY product_oper_view.doc_date
) AS p