I have a table defined like this:
CREATE TABLE data_table AS (
id bigserial,
"name" text NOT NULL,
"value" text NOT NULL,
CONSTRAINT data_table_pk PRIMARY KEY (id)
);
INSERT INTO data_table ("name", "value") VALUES
('key_1', 'value_1'),
('key_2', 'value_2');
I would like to get a JSON object from this table content, which will look like this:
{
"key_1":"value_1",
"key_2":"value_2"
}
Now I'm using the client application to parse the result set into JSON format. Is it possible to accomplish this by a postgresl query?
If you're on 9.4 you can do the following:
$ select json_object_agg("name", "value") from data_table;
json_object_agg
----------------------------------------------
{ "key_1" : "value_1", "key_2" : "value_2" }
select
format(
'{%s}',
string_agg(format(
'%s:%s',
to_json("name"),
to_json("value")
), ',')
)::json as json_object
from data_table;
json_object
---------------------------------------
{"key_1":"value_1","key_2":"value_2"}
In a generic scenario you can nest more than one json_object_agg functions on top of a subquery. The inner subqueries should always have at least one column that will be used by outer subquery as keys for the json_object_agg function.
In the example, in the subquery C the values of the column action are used as keys in the subquery A. In A the values of column role are used as keys in query A.
-- query A
select json_object_agg(q1.role, q1.actions) from (
-- subquery B
select q2.role, json_object_agg(q2.action, q2.permissions) as actions from (
-- subquery C
select r.name as role, a.name as action, json_build_object (
'enabled', coalesce(a.bit & bit_and(p.actionids) <> 0, false),
'guestUnsupported', r.name = 'guest' and a."guestUnsupported"
) as permissions
from role r
left join action a on a.entity = 'route'
left join permission p on p.roleid = r.id
and a.entity = p.entityname
and (p.entityid = 1 or p.entityid is null)
where
1 = 1
and r.enabled
and r.deleted is null
group by r.name, a.id
) as q2 group by q2.role
) as q1
The result is a single row/single column with the following content:
{
"Role 1": {
"APIPUT": {
"enabled": false,
"guestUnsupported": false
},
"APIDELETE": {
"enabled": false,
"guestUnsupported": false
},
"APIGET": {
"enabled": true,
"guestUnsupported": false
},
"APIPOST": {
"enabled": true,
"guestUnsupported": false
}
},
"Role 2": {
"APIPUT": {
"enabled": false,
"guestUnsupported": false
},
"APIDELETE": {
"enabled": false,
"guestUnsupported": false
},
"APIGET": {
"enabled": true,
"guestUnsupported": false
},
"APIPOST": {
"enabled": false,
"guestUnsupported": false
}
}
}
Related
I'm trying to query two values (DISCOUNT_TOTAL and ITEM_TOTAL) from a JSON object in a PostgreSQL database. Take the following query as reference:
SELECT
mt.customer_order
totals -> 0 -> 'amount' -> centAmount DISCOUNT_TOTAL
totals -> 1 -> 'amount' -> centAmount ITEM_TOTAL
FROM
my_table mt
to_jsonb(my_table.my_json -> 'data' -> 'order' -> 'totals') totals
WHERE
mt.customer_order in ('1000001', '1000002')
The query code works just fine, the big problem is that, for some reason out of my control, the values DISCOUNT_TOTAL and ITEM_TOTAL some times change their positions in the JSON object from one customer_order to other:
JSON Object
So i cannot aim to totals -> 0 -> 'amount' -> centAmount assuming that it contains the value related to type : DISCOUNT_TOTAL (same for type: ITEM_TOTAL). Is there any work around to get the correct centAmount for each type?
Use a path query instead of hardcoding the array positions:
with sample (jdata) as (
values (
'{
"data": {
"order": {
"email": "something",
"totals": [
{
"type": "ITEM_TOTAL",
"amount": {
"centAmount": 14990
}
},
{
"type": "DISCOUNT_TOTAL",
"amount": {
"centAmount": 6660
}
}
]
}
}
}'::jsonb)
)
select jsonb_path_query_first(
jdata,
'$.data.order.totals[*] ? (#.type == "DISCOUNT_TOTAL").amount.centAmount'
) as discount_total,
jsonb_path_query_first(
jdata,
'$.data.order.totals[*] ? (#.type == "ITEM_TOTAL").amount.centAmount'
) as item_total
from sample;
db<>fiddle here
EDIT: In case your PostgreSQL version does not support json path queries, you can do it by expanding the array into rows and then doing a pivot by case and sum:
with sample (order_id, jdata) as (
values ( 1,
'{
"data": {
"order": {
"email": "something",
"totals": [
{
"type": "ITEM_TOTAL",
"amount": {
"centAmount": 14990
}
},
{
"type": "DISCOUNT_TOTAL",
"amount": {
"centAmount": 6660
}
}
]
}
}
}'::jsonb)
)
select order_id,
sum(
case
when el->>'type' = 'DISCOUNT_TOTAL' then (el->'amount'->'centAmount')::int
else 0
end
) as discount_total,
sum(
case
when el->>'type' = 'ITEM_TOTAL' then (el->'amount'->'centAmount')::int
else 0
end
) as item_total
from sample
cross join lateral jsonb_array_elements(jdata->'data'->'order'->'totals') as a(el)
group by order_id;
db<>fiddle here
column contain value given below.
[
{
"bActive": false,
"sSubLocation": "",
"aiSeries": [],
"iUser": "1"
},
{
"bActive": true,
"sSubLocation": "Mytestcase",
"aiSeries": [],
"iUser": "1"
}
]
I want to get result as sSubLocation key where it have bActive =true and sSubLocation = "Mytestcase";
SELECT test.id, jsontable.*
FROM test
CROSS JOIN JSON_TABLE(test.value,
'$[*]' COLUMNS (bActive BOOLEAN PATH '$.bActive',
sSubLocation VARCHAR(255) PATH '$.sSubLocation',
aiSeries JSON PATH '$.aiSeries',
iUser VARCHAR(255) PATH '$.iUser')) jsontable
HAVING bActive = true
AND sSubLocation = 'Mytestcase'
https://dbfiddle.uk/?rdbms=mysql_8.0&fiddle=bcf7f238e23a2c282cdea76c183ae8fa
I have a JSONB field in PostgreSQL (12.5) table Data_Source with the data like that inside:
{
"C1": [
{
"id": 13371,
"class": "class_A1",
"inputs": {
"input_A1": 403096
},
"outputs": {
"output_A1": 403097
}
},
{
"id": 10200,
"class": "class_A2",
"inputs": {
"input_A2_1": 403096,
"input_A2_2": 403095
},
"outputs": {
"output_A2": [
[
403098,
{
"output_A2_1": 403101
},
{
"output_A2_2": [
403099,
403100
]
}
]
],
"output_A2_3": 403102,
"output_A2_4": 403103,
"output_A2_5": 403104
}
}
]
}
Could you please suggest me some SQL query to extract outputs from the JSONB field.
What I need to get as a results:
Output:
name
value
output_A1
403096
output_A2
403098
output_A2_1
403101
output_A2_2
403099
output_A2_2
403100
output_A2_3
403102
output_A2_4
403103
output_A2_5
403104
Any ideas?
Whenever an array is encountered, then JSONB_ARRAY_ELEMENTS(), or an object is encountered, then JSONB_EACH() functions might be applied, along with auxiliary JSONB_TYPEOF() function to determine respective types, consecutively. At the end, combine the results whether of type array or object or not through use of UNION ALL such as
WITH j AS
(
SELECT j2.*, JSONB_TYPEOF(j2.value) AS type
FROM t,
JSONB_EACH(jsdata) AS j0(k,v),
JSONB_ARRAY_ELEMENTS(v) AS j1,
JSONB_EACH((j1.value ->> 'outputs')::JSONB) AS j2
), jj AS
(
SELECT key,j1.*,JSONB_TYPEOF(j1.value::JSONB) AS type
FROM j,
JSONB_ARRAY_ELEMENTS(value) AS j0(v),
JSONB_ARRAY_ELEMENTS(v) AS j1
WHERE type = 'array'
), jjj AS
(
SELECT key,j0.v,JSONB_TYPEOF(j0.v::JSONB) AS type,k
FROM jj,
JSONB_EACH(value) AS j0(k,v)
WHERE type IN ('array','object')
)
SELECT key,value
FROM
(
SELECT key,value,type
FROM j
UNION ALL
SELECT key,value,type
FROM jj
UNION ALL
SELECT k,v,type
FROM jjj
) jt
WHERE type NOT IN ('array','object')
UNION ALL
SELECT k,value
FROM jjj,JSONB_ARRAY_ELEMENTS(v) AS j0
WHERE type IN ('array','object')
Demo
I am using SQL Server 2016 version, I can't tell if this is something specific to this version or whether I am missing something. I am trying to update a common property, node_status, inside an array of objects. The SQL update I am running to update node_status where is_node_complete: false is only updating the first index it finds rather than all that fit the query for node_status.
The JSON structure inside column json_doc in some_table
{
"personnel": [
{
"node_id": "FDA64E9F-3BAC-45FA-8819-8A086D96B359",
"node_data": {
"is_approved": null,
"is_node_complete": false,
"node_status": "requested"
}
},
{
"node_id": "AF829232-32F4-464B-8817-50ED24447AA4",
"node_data": {
"is_approved": null,
"is_node_complete": false,
"node_status": "requested"
}
},
{
"node_id": "E18F8197-B16D-4E0B-8EE9-DBF5B23A8EB5",
"node_data": {
"is_approved": true,
"is_node_complete": true,
"node_status": "complete"
}
},
{
"node_id": "286700AE-81C8-4F4F-955D-D8DCE44ED30C",
"node_data": {
"is_approved": false,
"is_node_complete": true,
"node_status": "complete"
}
},
{
"node_id": "BC7BD024-70F1-459B-BDBF-945A3EED666C",
"node_data": {
"is_approved": null,
"is_node_complete": false,
"node_status": "requested"
}
}
]
}
My query to update column
DECLARE #rec_id INT = 1;;
WITH personnel_CTE
AS (
SELECT *
FROM some_table AS acm
CROSS APPLY openjson(json_doc) WITH (personnel_node NVARCHAR(MAX) '$.personnel' AS json)
CROSS APPLY openjson(personnel_node) pn
WHERE id = #rec_id
AND cast(json_value(pn.value, '$.node_data.is_node_complete') AS BIT) = 0
)
UPDATE personnel_CTE
SET json_doc = json_modify(json_doc, '$.personnel[' + personnel_CTE.[key] + '].node_data.node_status', 'reviewer_assigned')
The guids are unique as well as the id for some_table. This is a truncated toy example, but these properties are the key items for the update.
I don't think that you can update the JSON content with this statement (updating one row with values from multiple rows), but you may try with the following approach, which parses the JSON data as a table using OPENJSON(), updates this table and outputs the table's content as JSON using FOR JSON PATH:
JSON:
DECLARE #json nvarchar(max) = N'
{
"personnel": [
{
"node_id": "FDA64E9F-3BAC-45FA-8819-8A086D96B359",
"node_data": {
"is_approved": null,
"is_node_complete": false,
"node_status": "requested"
}
},
{
"node_id": "AF829232-32F4-464B-8817-50ED24447AA4",
"node_data": {
"is_approved": null,
"is_node_complete": false,
"node_status": "requested"
}
},
{
"node_id": "E18F8197-B16D-4E0B-8EE9-DBF5B23A8EB5",
"node_data": {
"is_approved": true,
"is_node_complete": true,
"node_status": "complete"
}
},
{
"node_id": "286700AE-81C8-4F4F-955D-D8DCE44ED30C",
"node_data": {
"is_approved": false,
"is_node_complete": true,
"node_status": "complete"
}
},
{
"node_id": "BC7BD024-70F1-459B-BDBF-945A3EED666C",
"node_data": {
"is_approved": null,
"is_node_complete": false,
"node_status": "requested"
}
}
]
}'
Table and statement:
CREATE TABLE some_table (id int, json_doc nvarchar(max))
INSERT INTO some_table (id, json_doc) VALUES (1, #json)
INSERT INTO some_table (id, json_doc) VALUES (2, #json)
DECLARE #rec_id INT = 1;
UPDATE some_table
SET json_doc = (
SELECT
node_id AS 'node_id',
is_approved AS 'node_data.is_approved',
is_node_complete AS 'node_data.is_node_complete',
CASE
WHEN CONVERT(bit, is_node_complete) = 0 THEN 'reviewer_assigned'
ELSE node_status
END AS 'node_data.node_status'
FROM OPENJSON (json_doc, '$.personnel') WITH (
node_id nvarchar(36) '$.node_id',
is_approved bit '$.node_data.is_approved',
is_node_complete bit '$.node_data.is_node_complete',
node_status nvarchar(50) '$.node_data.node_status'
)
FOR JSON PATH, ROOT ('personnel'), INCLUDE_NULL_VALUES
)
WHERE id = #rec_id
In postgres Say I have schema as such:
table item {
type varchar(40)
entity_id bigint
entity_type varchar(40)
user_id bigint
}
And I want to query the table to get the info like this:
{
"typeA": {
"count": 3,
"me": true
},
"typeC": {
"count": 3,
"me": false
},
"typeE": {
"count": 3,
"me": false
},
"typeR": {
"count": 3,
"me": true
}
}
From a query where the main data is this:
SELECT ARRAY_AGG(x)
FROM
(
SELECT type,
count(*),
(CASE
WHEN (SELECT id
FROM items as i
WHERE i.entity_type = 'sometype'
AND i.entity_id = 234
AND i.user_id = 32
AND i.type = items.type) is not null
THEN true
ELSE false
END) AS me
FROM items
WHERE items.entity_type = 'sometype'
AND items.entity_id = 234
GROUP BY type
) as x
This returns an array of the info i need type count and me. But I need it formatted like above versus:
[
{
"type": "typeA",
"count": 3,
"me": true
},
{
"type": "typeC",
"count": 3,
"me": false
},
{
"type": "typeE",
"count": 3,
"me": false
},
{
"type": "typeR",
"count": 3,
"me": true
}
]
Which is the current way it is formatted. Have been unable to find a way to build the json object I need. I was able to get three json objects that are like that But I need the three nested in one object.
Not exactly what you want, but from PostgreSQL - Aggregate Functions, I would guess, you can try json_object_agg(name, value), e.g.
SELECT JSON_OBJECT_AGG(type, x)
FROM
(
SELECT type,
count(*),
(CASE
WHEN (SELECT id
FROM items as i
WHERE i.entity_type = 'sometype'
AND i.entity_id = 234
AND i.user_id = 32
AND i.type = items.type) is not null
THEN true
ELSE false
END) AS me
FROM items
WHERE items.entity_type = 'sometype'
AND items.entity_id = 234
GROUP BY type, me
) as x