How to generate a JSON value array in Azure SQL - json

DECLARE #segArr NVARCHAR(max)
set #segArr = N'[1,2,3]'
DECLARE #segTb table (
k int,
v NVARCHAR(20)
);
insert into #segTb
VALUES
(0, 'a'),
(1, 'b'),
(2, 'c'),
(3, 'd'),
(4, 'e'),
(5, 'f');
select t.v from #segTb t
JOIN openjson(#segArr) a on a.[key] = t.k
for JSON auto;
I have a simple table with a key-value like structure and a JSON array that is a list of keys of the values I wanted.
The select statement can create the desired result, but the JSON format is wrong. It outputs an array of objects.
[
{
"v": "a"
},
{
"v": "b"
},
{
"v": "c"
}
]
But what I needed is an array of direct values.
[ "a", "b", "c" ]

You can use more conventional string manipulation methods to create JSON arrays in Azure SQL DB such as STRING_AGG which aggregates strings with the given separator (eg comma) and QUOTENAME which surrounds strings with the given quote character. A simple example:
SELECT QUOTENAME( STRING_AGG( QUOTENAME(v, '"' ), ',' ), '[' ) AS yourArray
FROM
(
SELECT t.v FROM #segTb t
INNER JOIN OPENJSON(#segArr) a ON a.[key] = t.k
) x;
For your particular example you could just create a user-defined table-type, with CREATE TYPE. This would have the advantage of you being able to add a primary key to the type, to guarantee duplicates cannot be added, give the optimizer a bit more info at run-time and use native relational abilities rather than bolted-on NOSQL abilities. A simple example which should run end-to-end:
IF NOT EXISTS ( SELECT * FROM sys.types st INNER JOIN sys.schemas ss ON st.schema_id = ss.schema_id WHERE st.name = N'tvpItems' AND ss.name = N'dbo')
CREATE TYPE dbo.tvpItems AS TABLE
(
k INT PRIMARY KEY
);
GO
DECLARE #items AS dbo.tvpItems
INSERT INTO #items VALUES ( 1 ), ( 2 ), ( 3 )
DECLARE #segTb TABLE (
k INT,
v NVARCHAR(20)
);
INSERT INTO #segTb
VALUES
(0, 'a'),
(1, 'b'),
(2, 'c'),
(3, 'd'),
(4, 'e'),
(5, 'f');
SELECT t.v
FROM #segTb t
INNER JOIN #items i ON i.k = t.k
FOR JSON AUTO;

Related

mysql recursive query not showing all possible results

I am trying to follow Train Routes example as in the link below
https://www.percona.com/blog/2020/02/13/introduction-to-mysql-8-0-recursive-common-table-expression-part-2/
My table is as below
Schema (MySQL v8.0)
CREATE TABLE `routesy` (
`id` int(1) DEFAULT NULL,
`stationA` varchar(6) DEFAULT NULL,
`stationB` varchar(6) DEFAULT NULL,
`dist` int(3) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO `routesy` (`id`, `stationA`, `stationB`, `dist`) VALUES
(1, 'DO0182', 'DO0064', 10),
(2, 'DO0064', 'DO0147', 70),
(3, 'DO0064', 'DO0049', 80),
(4, 'DO0064', 'DO0139', 90),
(5, 'DO0206', 'DO0147', 140),
(6, 'DO0072', 'DO0139', 150),
(7, 'DO0008', 'DO0049', 260),
(8, 'DO0208', 'DO0008', 280);
Query #1
WITH RECURSIVE paths (cur_path, cur_dest, tot_distance) AS (
SELECT CAST(stationA AS CHAR(100)), CAST(stationA AS CHAR(100)), 0
FROM routesy
WHERE stationA='DO0182'
UNION
SELECT CONCAT(paths.cur_path, ' -> ', routesy.stationB), routesy.stationB, paths.tot_distance+routesy.dist
FROM paths, routesy
WHERE paths.cur_dest = routesy.stationA
AND NOT FIND_IN_SET(routesy.stationB, REPLACE(paths.cur_path,' -> ',',') ) )
SELECT cur_path,cur_dest,tot_distance FROM paths;
cur_path
cur_dest
tot_distance
DO0182
DO0182
0
DO0182 -> DO0064
DO0064
10
DO0182 -> DO0064 -> DO0147
DO0147
80
DO0182 -> DO0064 -> DO0049
DO0049
90
DO0182 -> DO0064 -> DO0139
DO0139
100
View on DB Fiddle
I was hoping to see results below as well as these are valid paths. Why does the recursion stop at 3 levels?
DO0182 -> DO0064 -> DO0147 -> DO0206
DO0182 -> DO0064 -> DO0139 -> DO0072
DO0182 -> DO0064 -> DO0049 -> DO0008 -> DO0208
Further to #vtan707 answer, the way to make routes bidirectection is add another UNION like:
WITH RECURSIVE paths (cur_path, cur_dest, tot_distance) AS (
SELECT CAST(stationA AS CHAR(100)), CAST(stationA AS CHAR(100)), 0
FROM routesy
WHERE stationA='DO0182'
UNION
SELECT CONCAT(paths.cur_path, ',', routesy.stationB), routesy.stationB, paths.tot_distance+routesy.dist
FROM paths JOIN routesy ON paths.cur_dest = routesy.stationA
AND NOT FIND_IN_SET(routesy.stationB, paths.cur_path)
UNION
SELECT CONCAT(paths.cur_path, ',', routesy.stationA), routesy.stationA, paths.tot_distance+routesy.dist
FROM paths JOIN routesy ON paths.cur_dest = routesy.stationB
AND NOT FIND_IN_SET(routesy.stationA, paths.cur_path)
)
SELECT REPLACE(cur_path,',',' -> '),cur_dest,tot_distance FROM paths;
So the second UNION is the same as your first with stationA and stationB transposed.
Replacing the sytnax once in the result set is probably easier too.
ref: mysql-8.0 fiddle
MariaDB has CYCLE RESTRICT as of 10.5:
WITH RECURSIVE paths (start, cur_path, cur_dest, tot_distance) AS (
SELECT StationA, CAST(stationA AS CHAR(100)), CAST(stationA AS CHAR(100)), 0
FROM routesy
WHERE stationA='DO0182'
UNION
SELECT StationA, CONCAT(paths.cur_path, ' -> ', routesy.stationB), routesy.stationB, paths.tot_distance+routesy.dist
FROM paths JOIN routesy ON paths.cur_dest = routesy.stationA
UNION
SELECT StationB, CONCAT(paths.cur_path, ' -> ', routesy.stationA), routesy.stationA, paths.tot_distance+routesy.dist
FROM paths JOIN routesy ON paths.cur_dest = routesy.stationB
)
CYCLE start, cur_dest RESTRICT
SELECT start, cur_path,cur_dest, tot_distance FROM paths;
ref mariadb-10.5 fiddle
note: this has small loops in the end of journey which I haven't worked out (hence 17 rows instead of 9 in the result - see fiddle).
With your given data in the table:
INSERT INTO `routesy` (`id`, `stationA`, `stationB`, `dist`) VALUES
(1, 'DO0182', 'DO0064', 10),
(2, 'DO0064', 'DO0147', 70),
(3, 'DO0064', 'DO0049', 80),
(4, 'DO0064', 'DO0139', 90),
(5, 'DO0206', 'DO0147', 140),
(6, 'DO0072', 'DO0139', 150),
(7, 'DO0008', 'DO0049', 260),
(8, 'DO0208', 'DO0008', 280);
and the starting point in the query "stationA='DO0182' ", we are only able to trace 3 levels as the query results has indicated.
The path is one-directional, i.e. Station A -> Station B is the only direction that is considered in the path (not Station B -> Station A).
Hope this is helpful.

How to fill the missing date in a sql table

I have a sql table related to discontinuous dates:
CREATE TABLE IF NOT EXISTS date_test1 ( items CHAR ( 8 ), trade_date date );
INSERT INTO `date_test1` VALUES ( 'a', '2020-03-20');
INSERT INTO `date_test1` VALUES ( 'b', '2020-03-20');
INSERT INTO `date_test1` VALUES ('a', '2020-03-21');
INSERT INTO `date_test1` VALUES ( 'c', '2020-03-22');
INSERT INTO `date_test1` VALUES ( 'd', '2020-03-22');
INSERT INTO `date_test1` VALUES ('a', '2020-03-25');
INSERT INTO `date_test1` VALUES ( 'e', '2020-03-26');
In this table, '2020-03-23' and '2020-03-24' are missed. I want to fill them by their previous data, in this table, the '2020-03-22' data.
Expected result:
The number of continues missing dates and of the records in one day are both uncertain.
So how to do this in mysql?
This solution uses Python and assumes that there aren't so many rows that they cannot be read into memory. I do not warrant this code free from defects; use at your own risk. So I suggest you run this against a copy of your table or make a backup first.
This code uses the pymysql driver.
import pymysql
from datetime import date, timedelta
from itertools import groupby
import sys
conn = pymysql.connect(db='x', user='x', password='x', charset='utf8mb4', use_unicode=True)
cursor = conn.cursor()
# must be sorted by date:
cursor.execute('select items, trade_date from date_test1 order by trade_date, items')
rows = cursor.fetchall() # tuples: (datetime.date, str)
if len(rows) == 0:
sys.exit(0)
groups = []
for k, g in groupby(rows, key=lambda row: row[1]):
groups.append(list(g))
one_day = timedelta(days=1)
previous_group = groups.pop(0)
next_date = previous_group[0][1]
for group in groups:
next_date = next_date + one_day
while group[0][1] != next_date:
# missing date
for tuple in previous_group:
cursor.execute('insert into date_test1(items, trade_date) values(%s, %s)', (tuple[0], next_date))
print('inserting', tuple[0], next_date)
conn.commit()
next_date = next_date + one_day
previous_group = group
Prints:
inserting c 2020-03-23
inserting d 2020-03-23
inserting c 2020-03-24
inserting d 2020-03-24
Discussion
With your sample data, after the rows are fetched, rows is:
(('a', datetime.date(2020, 3, 20)), ('b', datetime.date(2020, 3, 20)), ('a', datetime.date(2020, 3, 21)), ('c', datetime.date(2020, 3, 22)), ('d', datetime.date(2020, 3, 22)), ('a', datetime.date(2020, 3, 25)), ('e', datetime.date(2020, 3, 26)))
After the following is run:
groups = []
for k, g in groupby(rows, key=lambda row: row[1]):
groups.append(list(g))
groups is:
[[('a', datetime.date(2020, 3, 20)), ('b', datetime.date(2020, 3, 20))], [('a', datetime.date(2020, 3, 21))], [('c', datetime.date(2020, 3, 22)), ('d', datetime.date(2020, 3, 22))], [('a', datetime.date(2020, 3, 25))], [('e', datetime.date(2020, 3, 26))]]
That is, all the tuples with the same date are grouped together in a list so it becomes to easier to detect missing dates.

Maximum length of Json value in postgresql 10

Suppose I have the following data
WITH test(id, data) AS (
VALUES
(1, '{"key1": "Some text"}'::jsonb),
(2, '{"other_key": "Some longer text"}'::jsonb),
(3, '{"key_3": "Short"}'::jsonb)
)
select ??? from test;
Note that the JSON data is simple key-value data. The key can be anything, the value is always a String.
I want to return the maximum number of characters of the value field. 16 in this case, select length('Some longer text');
You need to turn the values into a set, then you can operate on it:
WITH test(id, data) AS (
VALUES
(1, '{"key1": "Some text"}'::jsonb),
(2, '{"other_key": "Some longer text"}'::jsonb),
(3, '{"key_3": "Short"}'::jsonb)
)
select max(length(t.val))
from test, jsonb_each_text(data) as t(k,val);

single table reccuring relatives

Can't name it less confusing, sorry...
Imagine the DB table with 3 columns:
object_id - some entity,
relation_key - some property of the object,
bundle_id - we must generalize different objects with this id.
Table has unique key for [object_id, relation_key]:
single object can't have duplicated relation_key,
but different objects can have equal relation_key
Some oxygen understanding with the picture:
Plenty objects can have deep relations by relation_key, all this objects will be related with bundle_id
How can I update bundle_id column with correct values using just single query?
I can write procedure but this way is unsuitable for me.
I look for statement like:
"UPDATE example [join example ON ...] SET bundle_id = ... WHERE ..."
there is "before" schema for mysql:
CREATE TABLE `example` (
`bundle_id` INT(11) DEFAULT NULL,
`object_id` INT(11) NOT NULL,
`relation_key` INT(11) NOT NULL,
PRIMARY KEY (`object_id`,`relation_key`)
);
INSERT INTO `example`(`object_id`, `relation_key`)
VALUES (1, 4), (1, 5), (1, 6), (2, 6), (2, 7), (2, 8), (3, 4),
(3, 9), (3, 10), (4, 11), (4, 12), (4, 13), (5, 14), (5, 15), (5, 16), (6, 17), (6, 11), (6, 18);
Here is the example "before": fiddle example (sqlfiddle stuck for this moment)
And "after" will look like like if you do the queries :
UPDATE `example` SET `bundle_id` = 1 WHERE `object_id` IN (1, 2, 3);
UPDATE `example` SET `bundle_id` = 2 WHERE `object_id` IN (4, 6);
UPDATE `example` SET `bundle_id` = 3 WHERE `object_id` IN (5);
object1 related to object2 by key=6,
object3 related TO object1 by key=4,
so ... objs 1, 2, 3 are related together.
here must be first bundle_id=1.
there is no other keys linking another objects to 1, 2, 3
object_id=4 related to object_id=6 by key=11
so ... obj [4, 6] are related together.
here must be second bundle_id=2,
there is no other keys linking another objects to 4, 6
object_id=5 has no relations to other objects
all object's key belong to itself.
here must be second bundle_id=3,
there is no other keys linking another objects to 5

Generate nested json with couting in Postgresql

I created a simple database (in latest stable postgresql), like this:
create table table_a(id int primary key not null, name char(10));
create table table_b(id int primary key not null, name char(10), parent_a_id int);
create table table_c(id int primary key not null, name char(10), parent_a_id int, parent_b_id int, parent_c_id int, c_number int);
create table table_d(id int primary key not null, name char(10), parent_c_id int, d_number int);
with some example data like this:
insert into table_a(id, name) values(1, "a");
insert into table_b(id, name, parent_a_id) values(1, "b", 1);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(1, "c1", 1, 1, null, 1);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(2, "c1.1", 1, 1, 1, 5);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(3, "c1.1.1", 1, 1, 2, 2);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(4, "c1.2", 1, 1, 1, 8);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(5, "c2", 1, 1, null, 4);
insert into table_d(id, name, parent_c_id, d_number) values(1, "c1_d1", 1, 5);
insert into table_d(id, name, parent_c_id, d_number) values(2, "c1.1_d1", 2, 6);
insert into table_d(id, name, parent_c_id, d_number) values(3, "c1.1_d2", 2, 1);
insert into table_d(id, name, parent_c_id, d_number) values(4, "c1.1.1_d1", 3, 2);
insert into table_d(id, name, parent_c_id, d_number) values(5, "c2_d1", 5, 4);
insert into table_d(id, name, parent_c_id, d_number) values(6, "c2_d2", 5, 3);
insert into table_d(id, name, parent_c_id, d_number) values(7, "c2_d3", 5, 7);
Now I want to generate json like this: http://codebeautify.org/jsonviewer/cb9bc2a1
With relation rules:
table_a has many table_b
table_b has one table_a and has many table_c (select only where table_c_id is null)
table_c has one table_a and has one table_b and has many table_c (children) and has one table_c (parent)
and couting rules:
table_c has d_numbers_sum (sum of d_number in table_d and sum of d_numbers_sum in table_c relation )
table_b has d_numbers_sum (sum of d_numbers_sum in table_c relation )
table_a has d_numbers_sum (sum of d_numbers_sum in table_b relation )
table_c has real_c_number (if has children_c then sum of real_c_number in table_c relation else c_number)
table_b has real_c_number_sum (sum of real_c_number in table_c relation )
table_a has real_c_number_sum (sum of real_c_number_sum in table_b relation )
Is it possible to generate that JSON with that rules in pure postgresql code?
Is it possible to generate shourtcat function for this like:
select * from my_shourtcat where id = ?;
or whitout id (generate json array):
select * from my_shourtcat;
Can you show me an example with description (how to generate nested json and couting), so I could use relations similar, but more complex that these in my app?
EDIT:
I wrote something interesting, but it's not 100% nested hash - here all leaf has own tree and result is an array of these trees I need to deep merge that array to create array of unique trees:
with recursive j as (
SELECT c.*, json '[]' children -- at max level, there are only leaves
FROM test.table_c c
WHERE (select count(1) from test.table_c where parent_c_id = c.id) = 0
UNION ALL
-- a little hack, because PostgreSQL doesn't like aggregated recursive terms
SELECT (c).*, array_to_json(array_agg(j)) children
FROM (
SELECT c, j
FROM j
JOIN test.table_c c ON j.parent_c_id = c.id
) v
GROUP BY v.c
)
SELECT json_agg(row_to_json(j)) json_tree FROM j WHERE parent_c_id is null;
The answer consists of two parts. First to rig up a basic json structure, and then to build up nested json objects from self-referencing column in table_c.
UPDATE: I rewrote example/part 2 as a pure sql solution, and added that code as example 3.
I also added a plsql function that encapsulates almost all code, that takes the name of a view as input to produce the nested json. See example 4.
All code requires Postgres 9.5.
The first code sets up a json object with most joins except for the nested children in table_c. The counting part is mostly left out.
In the second code example I wrote a "merge" function in pure plpgsql, which should solve the nested json problem. This solution requires only PG9.5 and no extensions, since plpgsql is built in.
As an alternative, I found one other solution that requires plv8 installed which does a deep merge in javascript
).
Creating nested json is not trivial to do in pure sql, where the challenge is to merge the separate json trees we can get from a recursive CTE.
Code example 1
Creating the query as a view makes it easy to reuse the query to either return a json array of all objects from table_a, or return only one object with a given id.
I made some small changes to the data model and data. The code for a self-contained example follows:
--TABLES
DROP SCHEMA IF EXISTS TEST CASCADE;
CREATE SCHEMA test;
-- Using text instead of char(10), to avoid padding. For most databases text is the best choice.
-- Postgresql uses the same implementation the hood (char vs text)
-- Source: https://www.depesz.com/2010/03/02/charx-vs-varcharx-vs-varchar-vs-text/
create table test.table_a(id int primary key not null, name text);
create table test.table_b(id int primary key not null, name text, parent_a_id int);
create table test.table_c(id int primary key not null, name text, parent_a_id int, parent_b_id int, parent_c_id int, c_number int);
create table test.table_d(id int primary key not null, name text, parent_c_id int, d_number int);
--DATA
insert into test.table_a(id, name) values(1, 'a');
-- Changed: parent_a_id=1 (instead of null)
insert into test.table_b(id, name, parent_a_id) values(1, 'b', 1);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(1, 'c1', 1, 1, null, 1);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(2, 'c1.1', 1, 1, 1, 5);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(3, 'c1.1.1', 1, 1, 2, 2);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(4, 'c1.2', 1, 1, 1, 8);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(5, 'c2', 1, 1, null, 4);
insert into test.table_d(id, name, parent_c_id, d_number) values(1, 'c1_d1', 1, 5);
insert into test.table_d(id, name, parent_c_id, d_number) values(2, 'c1.1_d1', 2, 6);
insert into test.table_d(id, name, parent_c_id, d_number) values(3, 'c1.1_d2', 2, 1);
insert into test.table_d(id, name, parent_c_id, d_number) values(4, 'c1.1.1_d1', 3, 2);
insert into test.table_d(id, name, parent_c_id, d_number) values(5, 'c2_d1', 5, 4);
insert into test.table_d(id, name, parent_c_id, d_number) values(6,'c2_d2', 5, 3);
insert into test.table_d(id, name, parent_c_id, d_number) values(7, 'c2_d3', 5, 7);
CREATE OR REPLACE VIEW json_objects AS
--Root object
SELECT ta.id, json_build_object(
'id', ta.id,
'name', ta.name,
'd_numbers_sum', (SELECT sum(d_number) FROM test.table_d),
'real_c_number_sum', null,
'children_b', (
-- table_b
SELECT json_agg(json_build_object(
'id', tb.id,
'name', tb.name,
'd_numbers_sum', null,
'real_c_number_sum', null,
'children_c', (
-- table_c
SELECT json_agg(json_build_object(
'id', tc.id,
'name', tc.name,
'd_numbers_sum', null,
'real_c_number_sum', null,
'children_d', (
-- table_d
SELECT json_agg(json_build_object(
'id', td.id,
'name', td.name,
'd_numbers_sum', null,
'real_c_number_sum', null
))
FROM test.table_d td
WHERE td.parent_c_id = tc.id
)
))
FROM test.table_c tc
WHERE tc.parent_b_id = tb.id
)
))
FROM test.table_b tb
WHERE tb.parent_a_id = ta.id
)
) AS object
FROM test.table_a ta
-- Return json array of all objects
SELECT json_agg(object) FROM json_objects;
-- Return only json object with given id
SELECT object FROM json_objects WHERE id = 1
Code example 2
Here we map the data from table_c so we can insert it directly into a recursive CTE from the documentation, for readability and educational purposes.
Then prepares the data as input to the "merge" function. For simplicity I just aggregated the rows into a big json object. The performance should be ok.
We can choose to get the parent object, or only its children as an (json)array in the third function parameter.
Which node to get the children for is specified in the last query in the last lines of the example. This query can be used all places where we need the children for a table_c node.
I did test this on a more complex example and it looks like I sorted out most rough edges.
The three parts of the CTE (graph, search_graph and filtered_graph) can be refactored into one for performance, since CTE's are optimization fences for the database planner, but I kept this version for readability and debugging.
This example utilizes jsonb instead of json, see the documentation.
The reason for using jsonb here is not having to reparse the json each time we manipulate it in the function. When the function is done, the result is casted back to json so it can be inserted directly into the code in example 1.
--DROP VIEW test.tree_path_list_v CASCADE;
CREATE OR REPLACE VIEW test.tree_path_list_v AS
WITH RECURSIVE
-- Map the source data so we can use it directly in a recursive query from the documentation:
graph AS
(
SELECT id AS id, parent_c_id AS link, name, jsonb_build_object('id', id, 'name', name, 'parent_c_id', parent_c_id, 'parent_a_id', parent_a_id, 'parent_b_id', parent_b_id) AS data
FROM test.table_c
),
-- Recursive query from documentation.
-- http://www.postgresql.org/docs/current/static/queries-with.html
search_graph(id, link, data, depth, path, cycle) AS (
SELECT g.id, g.link, g.data, 1,
ARRAY[g.id],
false
FROM graph g
UNION ALL
SELECT g.id, g.link, g.data, sg.depth + 1,
path || g.id,
g.id = ANY(path)
FROM graph g, search_graph sg
WHERE g.id = sg.link AND NOT cycle
),
-- Decorate/filter the result so it can be used as input to the "test.create_jsonb_tree" function
filtered_graph AS (
SELECT
sg.path[1] AS id,
sg.path[2] AS parent_id,
sg.depth AS level,
sg.id AS start_id,
d.name,
sg.path,
d.data::jsonb AS json
FROM search_graph sg
INNER JOIN graph d ON d.id = sg.path[1]
ORDER BY level DESC
)
-- "Main" query
SELECT * FROM filtered_graph
;
-- Returns a json object with all children merged into its parents.
-- Parameter 1 "_tree_path_list": A json document with rows from the view "test.tree_path_list_v" aggregates as one big json.
-- Parameter 2 "_children_keyname": Choose the name for the children
CREATE OR REPLACE FUNCTION test.create_jsonb_tree(_tree_path_list jsonb, _children_keyname text DEFAULT 'children', _get_only_children boolean DEFAULT false)
RETURNS jsonb AS
$$
DECLARE
node_map jsonb := jsonb_build_object();
node_result jsonb := jsonb_build_array();
parent_children jsonb := jsonb_build_array();
node jsonb;
relation jsonb;
BEGIN
FOR node IN SELECT * FROM jsonb_array_elements(_tree_path_list)
LOOP
RAISE NOTICE 'Input (per row): %', node;
node_map := jsonb_set(node_map, ARRAY[node->>'id'], node->'json');
END LOOP;
FOR relation IN SELECT * FROM jsonb_array_elements(_tree_path_list)
LOOP
IF ( (relation->>'level')::int > 1 ) THEN
parent_children := COALESCE(node_map->(relation->>'parent_id')->_children_keyname, jsonb_build_array()) || jsonb_build_array(node_map->(relation->>'id'));
node_map := jsonb_set(node_map, ARRAY[relation->>'parent_id', _children_keyname], parent_children);
node_map := node_map - (relation->>'id');
ELSE
IF _get_only_children THEN
node_result := node_map->(relation->>'id')->_children_keyname;
ELSE
node_result := node_map->(relation->>'id');
END IF;
END IF;
END LOOP;
RETURN node_result;
END;
$$ LANGUAGE plpgsql
;
-- Aggregate the rows from the view into a big json object. The function
SELECT test.create_jsonb_tree(
( SELECT jsonb_agg( (SELECT x FROM (SELECT id, parent_id, level, name, json) x) )
FROM test.tree_path_list_v
WHERE start_id = 1 --Which node to get children for
),
'children'::text,
true
)::json
;
Output for example 2
[
{
"id": 2,
"name": "c1.1",
"children": [
{
"id": 3,
"name": "c1.1.1",
"parent_a_id": 1,
"parent_b_id": 1,
"parent_c_id": 2
}
],
"parent_a_id": 1,
"parent_b_id": 1,
"parent_c_id": 1
},
{
"id": 4,
"name": "c1.2",
"parent_a_id": 1,
"parent_b_id": 1,
"parent_c_id": 1
}
]
Code example 3: pure sql nested json solution
I rewrote the nested-json code to pure sql, and put it into an SQL function so we can reuse the code by parameterizing the start_ids (as an array)
I have not benchmarked the code yet, and it does not necessarily perform better than the sql+plpgsql solution. I had to (ab)use CTEs to loop through the result the same way I do in plgsql to add nodes to their parents. The solution for "merging" is essentialy procedural even though it is pure sql.
--DROP VIEW test.source_data_v CASCADE;
--Map your data (in this view) so it can be directly used in the recursive CTE.
CREATE OR REPLACE VIEW test.source_data_v AS
SELECT
id AS id,
parent_c_id AS parent_id,
name as name, -- Only for debugging: Give the node a name for easier debugging (a name is easier than an id)
--jsonb_build_object('id', tree_id, 'name', name, 'pid', parent_tree_id, 'children', jsonb_build_array()) AS data --Allow empty children arrays
jsonb_build_object('id', id, 'name', name, 'parent_id', parent_c_id) AS data -- Ignore empty children arrays
FROM test.table_c
;
SELECT * FROM test.source_data_v;
--DROP VIEW test.tree_path_list_v CASCADE;
CREATE OR REPLACE FUNCTION test.get_nested_object(bigint[])
RETURNS jsonb
AS $$
WITH RECURSIVE
search_graph(id, parent_id, data, depth, path, cycle) AS (
SELECT g.id, g.parent_id, g.data, 1,
ARRAY[g.id],
false
FROM test.source_data_v g
UNION ALL
SELECT g.id, g.parent_id, g.data, sg.depth + 1,
path || g.id,
g.id = ANY(path)
FROM test.source_data_v g, search_graph sg
WHERE g.id = sg.parent_id AND NOT cycle
),
transformed_result_graph AS (
SELECT
sg.path[1] AS id,
d.parent_id,
sg.depth AS level,
sg.id AS start_id,
d.name,
sg.path,
(SELECT string_agg(t.name, ' ') FROM (SELECT unnest(sg.path::int[]) AS id) a INNER JOIN test.source_data_v t USING (id)) AS named_path,
d.data
FROM search_graph sg
INNER JOIN test.source_data_v d ON d.id = sg.path[1]
WHERE sg.id = ANY($1) --Parameterized input for start nodes
ORDER BY level DESC, start_id ASC
),
-- Sort path list and build a map/index of all individual nodes which we loop through in the next CTE:
sorted_paths AS (
SELECT null::int AS rownum, *
FROM transformed_result_graph WHERE false
UNION ALL
SELECT
0, null, null, null, null, null, null, null,
(SELECT jsonb_object_agg(id::text, data) FROM transformed_result_graph) -- Build a map/index of all individual nodes
UNION ALL
SELECT row_number() OVER () as rownum, *
FROM transformed_result_graph c
ORDER BY level DESC, start_id ASC
),
build_tree_loop (rownum, level, id, parent_id, data, named_path, result) AS (
SELECT
rownum, level, id, parent_id, data,
named_path,
data -- First row has the complete node map
FROM sorted_paths
WHERE rownum = 0
UNION ALL
SELECT
c.rownum, c.level, c.id, c.parent_id, c.data,
c.named_path,
CASE WHEN (c.parent_id IS NULL) OR (prev.result->(c.parent_id::text) IS NULL)
THEN prev.result
WHEN c.parent_id IS NOT NULL
THEN jsonb_set(
prev.result - (c.id::text), -- remove node and add it as child
ARRAY[c.parent_id::text, 'children'],
COALESCE(prev.result->(c.parent_id::text)->'children',jsonb_build_array())||COALESCE(prev.result->(c.id::text), jsonb_build_object('msg','ERROR')), -- add node as child (and create empty children array if not exist)
true --add key (children) if not exists
)
END AS result
FROM sorted_paths c -- Join each row in "sorted_paths" with the previous row from the CTE.
INNER JOIN build_tree_loop prev ON c.rownum = prev.rownum+1
), nested_start_nodes AS (
SELECT jsonb_agg(q.value) AS result
FROM jsonb_each((SELECT result FROM build_tree_loop ORDER BY rownum DESC LIMIT 1)) q
)
-- "Main" query
SELECT result FROM nested_start_nodes
$$ LANGUAGE sql STABLE;
-- END of sql function
SELECT test.get_nested_object(ARRAY[1]);
Output:
Unfortunately, jsonb does not preserver order, so "children" key comes first, making it harder to read the tree.
[
{
"children": [
{
"children": [
{
"id": 3,
"name": "c1.1.1",
"parent_id": 2
}
],
"id": 2,
"name": "c1.1",
"parent_id": 1
},
{
"id": 4,
"name": "c1.2",
"parent_id": 1
}
],
"id": 1,
"name": "c1",
"parent_id": null
}
]
Code example 4
Another variant: I put everything into a plsql function. The dynamic query inside the function takes the name of any view/table as parameter, which contains columns id+parent_id+data+name. It also takes an array of ids for where to start. When using the function in a query you can aggregate a set of ids to an array as input. (array_agg etc).
The function is not "transparent", so it is harder to optimize indexes and such. With the "_debug" parameter set to true the function wil loutput the raw generated sql as a notice, so you can explain analyze the query.
/*
Parameters:
_ids Array of ids. Specify where to start recursion down the tree.
_view Name of a view/table with the source data. The view must contain the following colums:
id(int/bigint)
parent_id(int/bigint)
data(jsonb) The data for each node, without the children key, which is added in this func.
name(text) Name is optional, only used for debugging purposes, can be empty string.
_children_keyname What key to use for children arrays
_no_root Exclude the root node, only returning the children array. Makes less sense when returning multiple root nodes (dont know which children belongs to which roots)
*/
--DROP FUNCTION test.get_nested_jsonb(bigint[], regclass, text, boolean, boolean) CASCADE;
CREATE OR REPLACE FUNCTION test.get_nested_jsonb(_ids bigint[], _view regclass, _children_keyname text DEFAULT 'children', _no_root boolean DEFAULT false, _debug boolean DEFAULT false)
RETURNS jsonb AS $$
DECLARE
dynamic_sql text := '';
tree_path_list jsonb;
node_map jsonb := jsonb_build_object();
node_result jsonb := jsonb_build_array();
parent_children jsonb := jsonb_build_array();
node jsonb;
relation jsonb;
BEGIN
dynamic_sql := format(
'
WITH RECURSIVE
search_graph(id, parent_id, depth, path, cycle) AS (
SELECT g.id, g.parent_id, 1,
ARRAY[g.id],
false
FROM '|| _view ||' g
UNION ALL
SELECT g.id, g.parent_id, sg.depth + 1,
path || g.id,
g.id = ANY(path)
FROM '|| _view ||' g, search_graph sg
WHERE g.id = sg.parent_id AND NOT cycle
),
graph_by_id AS (
SELECT
sg.path[1] AS id, d.parent_id, sg.depth, sg.id AS start_id, d.name, sg.path,
--(SELECT string_agg(t.name, '' '') FROM (SELECT unnest(sg.path::int[]) AS id) a INNER JOIN '|| _view ||' t USING (id)) AS named_path, -- For debugging, show the path as list of names instead of ids
d.data
FROM search_graph sg
INNER JOIN '|| _view ||' d ON d.id = sg.path[1] -- Join in data for the current node
WHERE sg.id = ANY($1) --Parameterized input for start nodes: To debug raw sql: replace variable $1 with array of ids: ARRAY[1]
ORDER BY depth DESC, start_id ASC
)
SELECT jsonb_agg( (SELECT x FROM (SELECT id, parent_id, depth, name, data) x) )
FROM graph_by_id
');
IF _debug THEN
RAISE NOTICE 'Dump of raw dynamic SQL. Remember to replace $1 with ARRAY[id1,id2]: %', dynamic_sql;
END IF;
EXECUTE dynamic_sql USING _ids INTO tree_path_list;
-- Create a node map (id as key)
FOR node IN SELECT * FROM jsonb_array_elements(tree_path_list)
LOOP
node := jsonb_set(node, ARRAY['data', _children_keyname], jsonb_build_array()); --add children key to all nodes
node_map := jsonb_set(node_map, ARRAY[node->>'id'], node->'data');
END LOOP;
RAISE NOTICE 'dump: %', node_map;
-- Loop sorted list, add nodes to node map from leaves and up
FOR relation IN SELECT * FROM jsonb_array_elements(tree_path_list)
LOOP
IF ( (relation->>'depth')::int > 1 ) THEN
parent_children := COALESCE(node_map->(relation->>'parent_id')->_children_keyname, jsonb_build_array()) || jsonb_build_array(node_map->(relation->>'id'));
node_map := jsonb_set(node_map, ARRAY[relation->>'parent_id', _children_keyname], parent_children);
node_map := node_map - (relation->>'id');
ELSE
IF _no_root THEN
node_result := node_map->(relation->>'id')->_children_keyname;
ELSE
node_result := node_map->(relation->>'id');
END IF;
END IF;
END LOOP;
RETURN node_result;
END;
$$ LANGUAGE plpgsql STABLE;
-- Test the function on a view 'test.source_data_v', starting from id=1
SELECT test.get_nested_jsonb(ARRAY[1], 'test.source_data_v', 'children', false, true);