Display Data on SSRS transpose - sql-server-2008

I want to display some data i have transposed, so that it is pivoted on a date at the top in SSRS like in the following picture:
I have tried already to do this via a dynamic sql query, but this just creates more problems in that the headers change names every day.
i expected it to be very easy just to spin the data around on SSRS but i cannot seem to work out how to do it.
This is SSRS 2008 / MSSQL 2012.
EDIT - When i try group on column "DATE" it comes out like this on ssrs, which is not what i want Click Here
EDIT
I have tried what was suggested below, but i dont know what you mean on how to do what you said? do you mean in SQL or in SSRS
THis was my query
IF OBJECT_ID('tempdb..#Cass_SSRS_DailyMiTable') IS NOT NULL
DROP TABLE #Cass_SSRS_DailyMiTable
CREATE TABLE #Cass_SSRS_DailyMiTable (
[date] DATE ,[Total Orders] INT ,[Orders Done] INT ,[Pieces picked] INT ,[Items Picked] INT ,[Average Items on Order] INT ,[Picked Today] INT)
INSERT INTO #Cass_SSRS_DailyMiTable (
date,
[Total Orders],
[Pieces picked],
[Items Picked],
[Average Items on Order],
[Picked Today]) VALUES
('2017-03-24', 53, 352, 33, 22, 0),
('2017-03-25', 351, 23, 235, 52, 0),
('2017-03-26', 35, 55, 25, 95, 0)
DECLARE #cols AS NVARCHAR(MAX),
#query AS NVARCHAR(MAX);
SET #cols = STUFF((SELECT ',' + QUOTENAME(CONVERT(VARCHAR(100), c.date, 120))
FROM #Cass_SSRS_DailyMiTable c
ORDER BY c.date ASC
FOR XML PATH(''), TYPE
).value('.', 'NVARCHAR(MAX)')
,1,1,'')
set #query =
';WITH PreUnpivot AS
(
SELECT
C.date,
C.[Total Orders],
C.[Pieces picked],
C.[Items Picked],
C.[Average Items on Order],
C.[Picked Today]
FROM
#Cass_SSRS_DailyMiTable AS C
)
SELECT
P.Concept,
' + #cols + '
FROM
PreUnpivot AS C
UNPIVOT (
PivotedValues FOR Concept IN ([Total Orders], [Pieces picked], [Items Picked], [Average Items on Order], [Picked Today])
) AS T
PIVOT (
MAX(T.PivotedValues) FOR T.Date IN (' + #cols + ')
) AS P'
EXEC (#query)

Firstly you need to have a generic common identifier within your dataset, so that all rows have the same identifier which can be grouped upon. I inserted and used a field called "id" within the dataset.
Secondly insert a matrix with one column group(date field) and four row groups (category groups).
For each row group they must be grouped by the generic common identifier.(group properties, group on......)
Example below:

I have used your original sql and amended the script accordingly:
IF OBJECT_ID('tempdb..#Cass_SSRS_DailyMiTable') IS NOT NULL
DROP TABLE #Cass_SSRS_DailyMiTable
CREATE TABLE #Cass_SSRS_DailyMiTable (
[date] DATE ,[Total Orders] INT ,[Orders Done] INT ,[Pieces picked] INT ,[Items Picked] INT ,[Average Items on Order] INT ,[Picked Today] INT, [id] INT)
INSERT INTO #Cass_SSRS_DailyMiTable (
date,
[Total Orders],
[Pieces picked],
[Items Picked],
[Average Items on Order],
[Picked Today],
[id] )VALUES
('2017-03-24', 53, 352, 33, 22, 0 , 1),
('2017-03-25', 351, 23, 235, 52, 0, 1),
('2017-03-26', 35, 55, 25, 95, 0, 1)
select *
from #Cass_SSRS_DailyMiTable
The format of the report definition in my previous answer is still applicable.
If you require further please message me with your email address.

Related

Import JSON files using script to SQL Table

I'm trying to import multiple JSON files using a script to be inserted into SQL Table, the format of the JSON as the following example:
[
{
"eventSubmissionTime": 1595235794804,
"correlationId": "gems;shipmentRef=204055527;gemsId=6422676195;GATE-OUTEXPY;integrationId=941343283",
"eventName": "Actual gate out",
"senderOrgName": "Maersk",
"senderOrgTypes": [
"ocean carrier"
],
"originatorId": "MAEU",
"eventOccurrenceTime": 1595234100000,
"eventOccurrenceTime8601": "2020-07-20T11:35:00+03:00",
"originatorName": "Maersk Line",
"vehicleId": "JOMERC",
"transportationPhase": "Export",
"eventTransactionId": "4b73ccd8-b202-4827-bcd7-4d7074efcb0e",
"carrierBookingNumber": "204055527",
"terminal": "JOZRC01",
"transportEquipmentId": "75e47f1f-1164-4f48-9097-3525c64924e6",
"equipmentNumber": "MRSU3607627",
"subscriptionId": "53e1e15d-e9cc-4a81-9814-57fb7643fb6c",
"fullStatus": "Empty",
"eventType": "actualGateOut",
"associatedConsignmentIds": [],
"associatedCarrierBookingNumbers": [],
"location": {
"type": "UN/Locode",
"value": "JOAMM"
},
"messages": [
"common.event.status.success"
],
"sentFromInternal": false,
"eventPriority": 5
}
]
and I'm using the following SQL code to import the files:
DECLARE
#SQL Varchar(200),
#file Varchar(100),
#Subdirectory varchar(50)
DECLARE
#files table (Subdirectory Varchar(50),depth int,[file] int)
Insert into #files execute master.dbo.xp_DirTree 'D:\TradeLens',1,1
--Insert into #files execute master.dbo.xp_DirTree '\\127.0.0.1\d$\TradeLens',1,1
--select * from #files
DECLARE Cur_FileName Cursor FOR Select Subdirectory from #files
Open Cur_FileName
FETCH NEXT FROM Cur_FileName INTO #Subdirectory
WHILE ##FETCH_STATUS = 0
BEGIN
SET #file = '''D:\TradeLens\'+ #Subdirectory + '''';
--Select #file
--SET #SQL= 'INSERT INTO Jtable1 SELECT * FROM OPENROWSET(BULK '+#file+', SINGLE_CLOB ) AS e'
SET #SQL= 'INSERT INTO JTable SELECT * FROM OPENROWSET(BULK '+#file+', SINGLE_CLOB ) AS e'
PRINT #SQL
EXEC (#SQL)
FETCH NEXT FROM Cur_FileName INTO #Subdirectory
END
Close Cur_FileName
DEALLOCATE Cur_FileName
I'm facing now the following error:
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020538.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020716.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020717.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020718.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020722.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020723.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020726.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020727.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020729.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020732.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
INSERT INTO Aseel2 SELECT * FROM OPENROWSET(BULK
'D:\TradeLens\20200720020733.json', SINGLE_CLOB ) AS e Msg 213, Level
16, State 1, Line 1 Column name or number of supplied values does not
match table definition.
Completion time: 2020-07-23T09:41:45.0250136+03:00
My problem is that I'm not able to create the correct SQL table structure even I used online convert tools from JSON to SQL, so the question how I can insert the data in the SQL table with avoiding the unmatched structure issue.

Mysql update count of records

I have a table 'product_records' as follows:
id - int (6),
product_group - int (7),
product_subgroup - int (7),
type - int(3),
count_of_reports - int (6)
At the moment the values in column count_of_reports for all records are 0.
What is the most efficient way of adding count_of_reports for every row for matching product_group, product_subgroup and type?
Example:
1, 23, 1, 1, count here (i.e. 2);
2, 23, 2, 1, count here (i.e. 1);
3, 23, 1, 1, count here (i.e. 2);
4, 24, 1, 3, count here (i.e. 1);
Thank you in advance.
You can use below update statement -
UPDATE product_records PR
JOIN (SELECT CONCAT(product_group, product_subgroup) ID, COUNT(DISTINCT CONCAT(product_group, product_subgroup)) NUM
FROM product_records
GROUP BY CONCAT(product_group, product_subgroup)) PR2
ON CONCAT(PR.product_group, PR.product_subgroup) = PR2.ID
SET PR.count_of_reports = PR2.NUM

Optimize - Function that SELECTs from TEMP TABLE within loop to get averages of JSON values

I have a Mysql Function that runs as part of a larger query reading a few million records. In order to detect anomalies, I'm figuring out the average change over time. The data in the table is stored as JSON objects with UNIX timestamps as the key for up to 30 days.
As an example, the input (input_array) would look something like:
[{"1532944806": 16}, {"1533031206": 14}, {"1533117605": 13}, {"1533204305": 12}, {"1533290708": 10}, {"1533463506": 9}, {"1533549907": 9}, {"1533636306": 9}, {"1533722707": 9}, {"1533809108": 9}, {"1533895506": 9}, {"1533981906": 8}, {"1534068306": 7}, {"1534154706": 7}, {"1534241108": 7}, {"1534590304": 7}, {"1534673106": 12}, {"1534759508": 6}, {"1534845905": 7}, {"1534932306": 7}, {"1535018707": 5}, {"1535105106": 3}, {"1535191505": 7}, {"1535277907": 6}, {"1535364305": 7}, {"1535450706": 2}, {"1535537107": 1}]
I'm only looking to average decreasing changes - not any change that increases over a day.
I'm checking that a value for the previous day exists, and if so, I'm calculating change and adding it into a temporary table that gets queried at to select the average.
So far I have:
CREATE FUNCTION `daily_averages`(input_array JSON) RETURNS int(4)
READS SQL DATA
DETERMINISTIC
BEGIN
DECLARE array_length INTEGER(2);
DECLARE prev_value INTEGER(4);
DECLARE idx INTEGER(4);
DROP TEMPORARY TABLE IF EXISTS collection;
CREATE TEMPORARY TABLE collection (change INTEGER(4) SIGNED DEFAULT 0);
SELECT JSON_LENGTH(input_array) INTO array_length;
SET idx = 0;
WHILE idx < array_length DO
SELECT
IF(idx-1 > -1,
CONVERT(
JSON_EXTRACT(
JSON_EXTRACT(
JSON_EXTRACT( input_array, CONCAT( '$[', idx-1, ']' ) )
, '$.*'
)
, '$[0]'
), SIGNED INTEGER
)
, -1
)
INTO prev_value;
INSERT INTO collection
SELECT (prev_value -
(
CONVERT(
JSON_EXTRACT(
JSON_EXTRACT(
JSON_EXTRACT( input_array, CONCAT( '$[', idx, ']' ) )
, '$.*'
)
, '$[0]'
), SIGNED INTEGER
)
)
)
FROM DUAL
WHERE prev_value > 0;
SET idx = idx + 1;
END WHILE;
RETURN (SELECT AVG(change) FROM collection WHERE change > -1);
END
With about 2.7 million records, it takes about 20 minutes to run currently. I'm looking to optimize this or re-write it by avoiding the DROP/CREATE overhead.
It seems unnecessary to create a table just to calculate an average, it's simple to do in the loop. Instead of inserting each value into a table, add it to a total variable. At the end, return total/count.
Since you're totalling the differences between values,
You can also use SET statements to assign variables, rather than SELECT ... INTO variable.
DECLARE array_length INTEGER(2);
DECLARE prev_value INTEGER(4);
DECLARE idx INTEGER(4);
DECLARE total INTEGER(4);
DECLARE counter INTEGER(4);
DECLARE cur_value INTEGER(4);
SET array_length = JSON_LENGTH(input_array);
SET total = 0;
SET counter = 0;
-- Initialize prev_value to the first element
SET prev_value = CONVERT(
JSON_EXTRACT(
JSON_EXTRACT(
JSON_EXTRACT( input_array, '$[0]' )
, '$.*'
)
, '$[0]'
), SIGNED INTEGER
);
SET idx = 1;
WHILE idx < array_length DO
SET cur_value = CONVERT(
JSON_EXTRACT(
JSON_EXTRACT(
JSON_EXTRACT( input_array, CONCAT( '$[', idx, ']' ) )
, '$.*'
)
, '$[0]'
), SIGNED INTEGER
);
IF cur_value < prev_value
THEN
SET total = total + (prev_value - cur_value);
SET counter = counter + 1;
END IF;
SET prev_value = cur_value;
SET idx = idx + 1;
END WHILE;
RETURN total / counter;
Digging inside a million JSON strings. I'm amazed it took only 20 minutes.
As you insert the rows, do some calculations and store the results somewhere. Then use that for doing the monitoring.
Even if you can't do it as you insert the rows, do it only to the 'new' rows. Again save the previous info somewhere.
As for DROP/CREATE... That can be sped up by having a permanent table, then use only TRUNCATE TABLE at the start of each proc call.
The (4) in INTEGER(4) does not mean anything. You will always get a 32-bit integer. (This note probably has no impact on the proc.)

Generate nested json with couting in Postgresql

I created a simple database (in latest stable postgresql), like this:
create table table_a(id int primary key not null, name char(10));
create table table_b(id int primary key not null, name char(10), parent_a_id int);
create table table_c(id int primary key not null, name char(10), parent_a_id int, parent_b_id int, parent_c_id int, c_number int);
create table table_d(id int primary key not null, name char(10), parent_c_id int, d_number int);
with some example data like this:
insert into table_a(id, name) values(1, "a");
insert into table_b(id, name, parent_a_id) values(1, "b", 1);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(1, "c1", 1, 1, null, 1);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(2, "c1.1", 1, 1, 1, 5);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(3, "c1.1.1", 1, 1, 2, 2);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(4, "c1.2", 1, 1, 1, 8);
insert into table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(5, "c2", 1, 1, null, 4);
insert into table_d(id, name, parent_c_id, d_number) values(1, "c1_d1", 1, 5);
insert into table_d(id, name, parent_c_id, d_number) values(2, "c1.1_d1", 2, 6);
insert into table_d(id, name, parent_c_id, d_number) values(3, "c1.1_d2", 2, 1);
insert into table_d(id, name, parent_c_id, d_number) values(4, "c1.1.1_d1", 3, 2);
insert into table_d(id, name, parent_c_id, d_number) values(5, "c2_d1", 5, 4);
insert into table_d(id, name, parent_c_id, d_number) values(6, "c2_d2", 5, 3);
insert into table_d(id, name, parent_c_id, d_number) values(7, "c2_d3", 5, 7);
Now I want to generate json like this: http://codebeautify.org/jsonviewer/cb9bc2a1
With relation rules:
table_a has many table_b
table_b has one table_a and has many table_c (select only where table_c_id is null)
table_c has one table_a and has one table_b and has many table_c (children) and has one table_c (parent)
and couting rules:
table_c has d_numbers_sum (sum of d_number in table_d and sum of d_numbers_sum in table_c relation )
table_b has d_numbers_sum (sum of d_numbers_sum in table_c relation )
table_a has d_numbers_sum (sum of d_numbers_sum in table_b relation )
table_c has real_c_number (if has children_c then sum of real_c_number in table_c relation else c_number)
table_b has real_c_number_sum (sum of real_c_number in table_c relation )
table_a has real_c_number_sum (sum of real_c_number_sum in table_b relation )
Is it possible to generate that JSON with that rules in pure postgresql code?
Is it possible to generate shourtcat function for this like:
select * from my_shourtcat where id = ?;
or whitout id (generate json array):
select * from my_shourtcat;
Can you show me an example with description (how to generate nested json and couting), so I could use relations similar, but more complex that these in my app?
EDIT:
I wrote something interesting, but it's not 100% nested hash - here all leaf has own tree and result is an array of these trees I need to deep merge that array to create array of unique trees:
with recursive j as (
SELECT c.*, json '[]' children -- at max level, there are only leaves
FROM test.table_c c
WHERE (select count(1) from test.table_c where parent_c_id = c.id) = 0
UNION ALL
-- a little hack, because PostgreSQL doesn't like aggregated recursive terms
SELECT (c).*, array_to_json(array_agg(j)) children
FROM (
SELECT c, j
FROM j
JOIN test.table_c c ON j.parent_c_id = c.id
) v
GROUP BY v.c
)
SELECT json_agg(row_to_json(j)) json_tree FROM j WHERE parent_c_id is null;
The answer consists of two parts. First to rig up a basic json structure, and then to build up nested json objects from self-referencing column in table_c.
UPDATE: I rewrote example/part 2 as a pure sql solution, and added that code as example 3.
I also added a plsql function that encapsulates almost all code, that takes the name of a view as input to produce the nested json. See example 4.
All code requires Postgres 9.5.
The first code sets up a json object with most joins except for the nested children in table_c. The counting part is mostly left out.
In the second code example I wrote a "merge" function in pure plpgsql, which should solve the nested json problem. This solution requires only PG9.5 and no extensions, since plpgsql is built in.
As an alternative, I found one other solution that requires plv8 installed which does a deep merge in javascript
).
Creating nested json is not trivial to do in pure sql, where the challenge is to merge the separate json trees we can get from a recursive CTE.
Code example 1
Creating the query as a view makes it easy to reuse the query to either return a json array of all objects from table_a, or return only one object with a given id.
I made some small changes to the data model and data. The code for a self-contained example follows:
--TABLES
DROP SCHEMA IF EXISTS TEST CASCADE;
CREATE SCHEMA test;
-- Using text instead of char(10), to avoid padding. For most databases text is the best choice.
-- Postgresql uses the same implementation the hood (char vs text)
-- Source: https://www.depesz.com/2010/03/02/charx-vs-varcharx-vs-varchar-vs-text/
create table test.table_a(id int primary key not null, name text);
create table test.table_b(id int primary key not null, name text, parent_a_id int);
create table test.table_c(id int primary key not null, name text, parent_a_id int, parent_b_id int, parent_c_id int, c_number int);
create table test.table_d(id int primary key not null, name text, parent_c_id int, d_number int);
--DATA
insert into test.table_a(id, name) values(1, 'a');
-- Changed: parent_a_id=1 (instead of null)
insert into test.table_b(id, name, parent_a_id) values(1, 'b', 1);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(1, 'c1', 1, 1, null, 1);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(2, 'c1.1', 1, 1, 1, 5);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(3, 'c1.1.1', 1, 1, 2, 2);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(4, 'c1.2', 1, 1, 1, 8);
insert into test.table_c(id, name, parent_a_id, parent_b_id, parent_c_id, c_number) values(5, 'c2', 1, 1, null, 4);
insert into test.table_d(id, name, parent_c_id, d_number) values(1, 'c1_d1', 1, 5);
insert into test.table_d(id, name, parent_c_id, d_number) values(2, 'c1.1_d1', 2, 6);
insert into test.table_d(id, name, parent_c_id, d_number) values(3, 'c1.1_d2', 2, 1);
insert into test.table_d(id, name, parent_c_id, d_number) values(4, 'c1.1.1_d1', 3, 2);
insert into test.table_d(id, name, parent_c_id, d_number) values(5, 'c2_d1', 5, 4);
insert into test.table_d(id, name, parent_c_id, d_number) values(6,'c2_d2', 5, 3);
insert into test.table_d(id, name, parent_c_id, d_number) values(7, 'c2_d3', 5, 7);
CREATE OR REPLACE VIEW json_objects AS
--Root object
SELECT ta.id, json_build_object(
'id', ta.id,
'name', ta.name,
'd_numbers_sum', (SELECT sum(d_number) FROM test.table_d),
'real_c_number_sum', null,
'children_b', (
-- table_b
SELECT json_agg(json_build_object(
'id', tb.id,
'name', tb.name,
'd_numbers_sum', null,
'real_c_number_sum', null,
'children_c', (
-- table_c
SELECT json_agg(json_build_object(
'id', tc.id,
'name', tc.name,
'd_numbers_sum', null,
'real_c_number_sum', null,
'children_d', (
-- table_d
SELECT json_agg(json_build_object(
'id', td.id,
'name', td.name,
'd_numbers_sum', null,
'real_c_number_sum', null
))
FROM test.table_d td
WHERE td.parent_c_id = tc.id
)
))
FROM test.table_c tc
WHERE tc.parent_b_id = tb.id
)
))
FROM test.table_b tb
WHERE tb.parent_a_id = ta.id
)
) AS object
FROM test.table_a ta
-- Return json array of all objects
SELECT json_agg(object) FROM json_objects;
-- Return only json object with given id
SELECT object FROM json_objects WHERE id = 1
Code example 2
Here we map the data from table_c so we can insert it directly into a recursive CTE from the documentation, for readability and educational purposes.
Then prepares the data as input to the "merge" function. For simplicity I just aggregated the rows into a big json object. The performance should be ok.
We can choose to get the parent object, or only its children as an (json)array in the third function parameter.
Which node to get the children for is specified in the last query in the last lines of the example. This query can be used all places where we need the children for a table_c node.
I did test this on a more complex example and it looks like I sorted out most rough edges.
The three parts of the CTE (graph, search_graph and filtered_graph) can be refactored into one for performance, since CTE's are optimization fences for the database planner, but I kept this version for readability and debugging.
This example utilizes jsonb instead of json, see the documentation.
The reason for using jsonb here is not having to reparse the json each time we manipulate it in the function. When the function is done, the result is casted back to json so it can be inserted directly into the code in example 1.
--DROP VIEW test.tree_path_list_v CASCADE;
CREATE OR REPLACE VIEW test.tree_path_list_v AS
WITH RECURSIVE
-- Map the source data so we can use it directly in a recursive query from the documentation:
graph AS
(
SELECT id AS id, parent_c_id AS link, name, jsonb_build_object('id', id, 'name', name, 'parent_c_id', parent_c_id, 'parent_a_id', parent_a_id, 'parent_b_id', parent_b_id) AS data
FROM test.table_c
),
-- Recursive query from documentation.
-- http://www.postgresql.org/docs/current/static/queries-with.html
search_graph(id, link, data, depth, path, cycle) AS (
SELECT g.id, g.link, g.data, 1,
ARRAY[g.id],
false
FROM graph g
UNION ALL
SELECT g.id, g.link, g.data, sg.depth + 1,
path || g.id,
g.id = ANY(path)
FROM graph g, search_graph sg
WHERE g.id = sg.link AND NOT cycle
),
-- Decorate/filter the result so it can be used as input to the "test.create_jsonb_tree" function
filtered_graph AS (
SELECT
sg.path[1] AS id,
sg.path[2] AS parent_id,
sg.depth AS level,
sg.id AS start_id,
d.name,
sg.path,
d.data::jsonb AS json
FROM search_graph sg
INNER JOIN graph d ON d.id = sg.path[1]
ORDER BY level DESC
)
-- "Main" query
SELECT * FROM filtered_graph
;
-- Returns a json object with all children merged into its parents.
-- Parameter 1 "_tree_path_list": A json document with rows from the view "test.tree_path_list_v" aggregates as one big json.
-- Parameter 2 "_children_keyname": Choose the name for the children
CREATE OR REPLACE FUNCTION test.create_jsonb_tree(_tree_path_list jsonb, _children_keyname text DEFAULT 'children', _get_only_children boolean DEFAULT false)
RETURNS jsonb AS
$$
DECLARE
node_map jsonb := jsonb_build_object();
node_result jsonb := jsonb_build_array();
parent_children jsonb := jsonb_build_array();
node jsonb;
relation jsonb;
BEGIN
FOR node IN SELECT * FROM jsonb_array_elements(_tree_path_list)
LOOP
RAISE NOTICE 'Input (per row): %', node;
node_map := jsonb_set(node_map, ARRAY[node->>'id'], node->'json');
END LOOP;
FOR relation IN SELECT * FROM jsonb_array_elements(_tree_path_list)
LOOP
IF ( (relation->>'level')::int > 1 ) THEN
parent_children := COALESCE(node_map->(relation->>'parent_id')->_children_keyname, jsonb_build_array()) || jsonb_build_array(node_map->(relation->>'id'));
node_map := jsonb_set(node_map, ARRAY[relation->>'parent_id', _children_keyname], parent_children);
node_map := node_map - (relation->>'id');
ELSE
IF _get_only_children THEN
node_result := node_map->(relation->>'id')->_children_keyname;
ELSE
node_result := node_map->(relation->>'id');
END IF;
END IF;
END LOOP;
RETURN node_result;
END;
$$ LANGUAGE plpgsql
;
-- Aggregate the rows from the view into a big json object. The function
SELECT test.create_jsonb_tree(
( SELECT jsonb_agg( (SELECT x FROM (SELECT id, parent_id, level, name, json) x) )
FROM test.tree_path_list_v
WHERE start_id = 1 --Which node to get children for
),
'children'::text,
true
)::json
;
Output for example 2
[
{
"id": 2,
"name": "c1.1",
"children": [
{
"id": 3,
"name": "c1.1.1",
"parent_a_id": 1,
"parent_b_id": 1,
"parent_c_id": 2
}
],
"parent_a_id": 1,
"parent_b_id": 1,
"parent_c_id": 1
},
{
"id": 4,
"name": "c1.2",
"parent_a_id": 1,
"parent_b_id": 1,
"parent_c_id": 1
}
]
Code example 3: pure sql nested json solution
I rewrote the nested-json code to pure sql, and put it into an SQL function so we can reuse the code by parameterizing the start_ids (as an array)
I have not benchmarked the code yet, and it does not necessarily perform better than the sql+plpgsql solution. I had to (ab)use CTEs to loop through the result the same way I do in plgsql to add nodes to their parents. The solution for "merging" is essentialy procedural even though it is pure sql.
--DROP VIEW test.source_data_v CASCADE;
--Map your data (in this view) so it can be directly used in the recursive CTE.
CREATE OR REPLACE VIEW test.source_data_v AS
SELECT
id AS id,
parent_c_id AS parent_id,
name as name, -- Only for debugging: Give the node a name for easier debugging (a name is easier than an id)
--jsonb_build_object('id', tree_id, 'name', name, 'pid', parent_tree_id, 'children', jsonb_build_array()) AS data --Allow empty children arrays
jsonb_build_object('id', id, 'name', name, 'parent_id', parent_c_id) AS data -- Ignore empty children arrays
FROM test.table_c
;
SELECT * FROM test.source_data_v;
--DROP VIEW test.tree_path_list_v CASCADE;
CREATE OR REPLACE FUNCTION test.get_nested_object(bigint[])
RETURNS jsonb
AS $$
WITH RECURSIVE
search_graph(id, parent_id, data, depth, path, cycle) AS (
SELECT g.id, g.parent_id, g.data, 1,
ARRAY[g.id],
false
FROM test.source_data_v g
UNION ALL
SELECT g.id, g.parent_id, g.data, sg.depth + 1,
path || g.id,
g.id = ANY(path)
FROM test.source_data_v g, search_graph sg
WHERE g.id = sg.parent_id AND NOT cycle
),
transformed_result_graph AS (
SELECT
sg.path[1] AS id,
d.parent_id,
sg.depth AS level,
sg.id AS start_id,
d.name,
sg.path,
(SELECT string_agg(t.name, ' ') FROM (SELECT unnest(sg.path::int[]) AS id) a INNER JOIN test.source_data_v t USING (id)) AS named_path,
d.data
FROM search_graph sg
INNER JOIN test.source_data_v d ON d.id = sg.path[1]
WHERE sg.id = ANY($1) --Parameterized input for start nodes
ORDER BY level DESC, start_id ASC
),
-- Sort path list and build a map/index of all individual nodes which we loop through in the next CTE:
sorted_paths AS (
SELECT null::int AS rownum, *
FROM transformed_result_graph WHERE false
UNION ALL
SELECT
0, null, null, null, null, null, null, null,
(SELECT jsonb_object_agg(id::text, data) FROM transformed_result_graph) -- Build a map/index of all individual nodes
UNION ALL
SELECT row_number() OVER () as rownum, *
FROM transformed_result_graph c
ORDER BY level DESC, start_id ASC
),
build_tree_loop (rownum, level, id, parent_id, data, named_path, result) AS (
SELECT
rownum, level, id, parent_id, data,
named_path,
data -- First row has the complete node map
FROM sorted_paths
WHERE rownum = 0
UNION ALL
SELECT
c.rownum, c.level, c.id, c.parent_id, c.data,
c.named_path,
CASE WHEN (c.parent_id IS NULL) OR (prev.result->(c.parent_id::text) IS NULL)
THEN prev.result
WHEN c.parent_id IS NOT NULL
THEN jsonb_set(
prev.result - (c.id::text), -- remove node and add it as child
ARRAY[c.parent_id::text, 'children'],
COALESCE(prev.result->(c.parent_id::text)->'children',jsonb_build_array())||COALESCE(prev.result->(c.id::text), jsonb_build_object('msg','ERROR')), -- add node as child (and create empty children array if not exist)
true --add key (children) if not exists
)
END AS result
FROM sorted_paths c -- Join each row in "sorted_paths" with the previous row from the CTE.
INNER JOIN build_tree_loop prev ON c.rownum = prev.rownum+1
), nested_start_nodes AS (
SELECT jsonb_agg(q.value) AS result
FROM jsonb_each((SELECT result FROM build_tree_loop ORDER BY rownum DESC LIMIT 1)) q
)
-- "Main" query
SELECT result FROM nested_start_nodes
$$ LANGUAGE sql STABLE;
-- END of sql function
SELECT test.get_nested_object(ARRAY[1]);
Output:
Unfortunately, jsonb does not preserver order, so "children" key comes first, making it harder to read the tree.
[
{
"children": [
{
"children": [
{
"id": 3,
"name": "c1.1.1",
"parent_id": 2
}
],
"id": 2,
"name": "c1.1",
"parent_id": 1
},
{
"id": 4,
"name": "c1.2",
"parent_id": 1
}
],
"id": 1,
"name": "c1",
"parent_id": null
}
]
Code example 4
Another variant: I put everything into a plsql function. The dynamic query inside the function takes the name of any view/table as parameter, which contains columns id+parent_id+data+name. It also takes an array of ids for where to start. When using the function in a query you can aggregate a set of ids to an array as input. (array_agg etc).
The function is not "transparent", so it is harder to optimize indexes and such. With the "_debug" parameter set to true the function wil loutput the raw generated sql as a notice, so you can explain analyze the query.
/*
Parameters:
_ids Array of ids. Specify where to start recursion down the tree.
_view Name of a view/table with the source data. The view must contain the following colums:
id(int/bigint)
parent_id(int/bigint)
data(jsonb) The data for each node, without the children key, which is added in this func.
name(text) Name is optional, only used for debugging purposes, can be empty string.
_children_keyname What key to use for children arrays
_no_root Exclude the root node, only returning the children array. Makes less sense when returning multiple root nodes (dont know which children belongs to which roots)
*/
--DROP FUNCTION test.get_nested_jsonb(bigint[], regclass, text, boolean, boolean) CASCADE;
CREATE OR REPLACE FUNCTION test.get_nested_jsonb(_ids bigint[], _view regclass, _children_keyname text DEFAULT 'children', _no_root boolean DEFAULT false, _debug boolean DEFAULT false)
RETURNS jsonb AS $$
DECLARE
dynamic_sql text := '';
tree_path_list jsonb;
node_map jsonb := jsonb_build_object();
node_result jsonb := jsonb_build_array();
parent_children jsonb := jsonb_build_array();
node jsonb;
relation jsonb;
BEGIN
dynamic_sql := format(
'
WITH RECURSIVE
search_graph(id, parent_id, depth, path, cycle) AS (
SELECT g.id, g.parent_id, 1,
ARRAY[g.id],
false
FROM '|| _view ||' g
UNION ALL
SELECT g.id, g.parent_id, sg.depth + 1,
path || g.id,
g.id = ANY(path)
FROM '|| _view ||' g, search_graph sg
WHERE g.id = sg.parent_id AND NOT cycle
),
graph_by_id AS (
SELECT
sg.path[1] AS id, d.parent_id, sg.depth, sg.id AS start_id, d.name, sg.path,
--(SELECT string_agg(t.name, '' '') FROM (SELECT unnest(sg.path::int[]) AS id) a INNER JOIN '|| _view ||' t USING (id)) AS named_path, -- For debugging, show the path as list of names instead of ids
d.data
FROM search_graph sg
INNER JOIN '|| _view ||' d ON d.id = sg.path[1] -- Join in data for the current node
WHERE sg.id = ANY($1) --Parameterized input for start nodes: To debug raw sql: replace variable $1 with array of ids: ARRAY[1]
ORDER BY depth DESC, start_id ASC
)
SELECT jsonb_agg( (SELECT x FROM (SELECT id, parent_id, depth, name, data) x) )
FROM graph_by_id
');
IF _debug THEN
RAISE NOTICE 'Dump of raw dynamic SQL. Remember to replace $1 with ARRAY[id1,id2]: %', dynamic_sql;
END IF;
EXECUTE dynamic_sql USING _ids INTO tree_path_list;
-- Create a node map (id as key)
FOR node IN SELECT * FROM jsonb_array_elements(tree_path_list)
LOOP
node := jsonb_set(node, ARRAY['data', _children_keyname], jsonb_build_array()); --add children key to all nodes
node_map := jsonb_set(node_map, ARRAY[node->>'id'], node->'data');
END LOOP;
RAISE NOTICE 'dump: %', node_map;
-- Loop sorted list, add nodes to node map from leaves and up
FOR relation IN SELECT * FROM jsonb_array_elements(tree_path_list)
LOOP
IF ( (relation->>'depth')::int > 1 ) THEN
parent_children := COALESCE(node_map->(relation->>'parent_id')->_children_keyname, jsonb_build_array()) || jsonb_build_array(node_map->(relation->>'id'));
node_map := jsonb_set(node_map, ARRAY[relation->>'parent_id', _children_keyname], parent_children);
node_map := node_map - (relation->>'id');
ELSE
IF _no_root THEN
node_result := node_map->(relation->>'id')->_children_keyname;
ELSE
node_result := node_map->(relation->>'id');
END IF;
END IF;
END LOOP;
RETURN node_result;
END;
$$ LANGUAGE plpgsql STABLE;
-- Test the function on a view 'test.source_data_v', starting from id=1
SELECT test.get_nested_jsonb(ARRAY[1], 'test.source_data_v', 'children', false, true);

SQL Server Tree Query

I need some help is MS SQL Server Query. I’m not much of a DBA. I have an application with an Organization Table which is made up of a parent-child relationship:
CREATE TABLE [dbo].[Organizations](
[OrgPK] [int] IDENTITY(1,1) NOT NULL,
[OrgParentFK] [int] NULL,
[OrgName] [varchar](200) NOT NULL,
CONSTRAINT [PK__Organizations] PRIMARY KEY CLUSTERED
Sample data looks like this:
OrgPK, OrgParentFK, OrgName
1, 0, Corporate
2, 1, Department A
3, 1, Department B
4, 2, Division 1
5, 2, Division 2
6, 3, Division 1
7, 6, Section 1
8, 6, Section 2
I'm trying to generate a query that returns an org path based on a given OrgPK. Example if given OrgPK = 7 the query would return 'Corporation/Department B/Division 1/Section 1'
If give OrgPk = 5 the return string would be 'Corporation/Department A/Division 2'
Thank you for your assistance.
WITH OrganizationsH (OrgParentFK, OrgPK, OrgName, level, Label) AS
(
SELECT OrgParentFK, OrgPK, OrgName, 0, CAST(OrgName AS VARCHAR(MAX)) As Label
FROM Organizations
WHERE OrgParentFK IS NULL
UNION ALL
SELECT o.OrgParentFK, o.OrgPK, o.OrgName, level + 1, CAST(h.Label + '/' + o.OrgName VARCHAR(MAX)) As Label
FROM Organizations o JOIN OrganizationsH h ON o.OrgParentFK = h.OrgPK
)
SELECT OrgParentFK, OrgPK, OrgName, level, Label
FROM OrganizationsH
WHERE OrgPK = 5
h/t to marc_s
It can also be solved by creating a scalar valued function:
-- SELECT [dbo].[ListTree](5)
CREATE FUNCTION [dbo].[ListTree](#OrgPK int)
RETURNS varchar(max)
AS
BEGIN
declare #Tree varchar(MAX)
set #Tree = ''
while(exists(select * from dbo.Organizations where OrgPK=#OrgPK))
begin
select #Tree=OrgName+'/'+#Tree,
#OrgPK=OrgParentFK
from dbo.Organizations
where OrgPK=#OrgPK
end
return left(#Tree,len(#Tree)-1)
END