Convert result into doubly nested JSON format - json

I am trying to convert SQL Server results into a doubly nested JSON format.
Source SQL Server table:
ID
Name
Program
Type
Section
Director
Project
Sr Manager
PCM
Contractor
Cost Client
123
abc
qew
tyu
dd
ghghjg
hkhjk
fghfgf
gnhghj
gghgh
gghhg
456
yui
gdffgf
ghgf
jkjlkll
uiop
rtyuui
rfv
ujmk
rfvtg
efgg
Convert into doubly JSON as shown here:
[
[
{"key":"ID","value":"123"},
{"key":"Name","value":"abc"},
{"key":"Program","value":"qew"},
{"key":"Type","value":"tyu"},
{"key":"Section","value":"dd"},
{"key":"Director","value":"ghghjg"},
{"key":"Project","value":"hkhjk"},
{"key":"Sr Manager","value":"fghfgf"},
{"key":"PCM","value":"gnhghj"},
{"key":"Contractor","value":"gghgh"},
{"key":"Cost Client","value":"gghhg"}
],
[
{"key":"ID","value":"456"},
{"key":"Name","value":"yui"},
{"key":"Program","value":"gdffgf"},
{"key":"Type","value":"ghgfjhjhj"},
{"key":"Section","value":"jkjlkll"},
{"key":"Director","value":"uiop"},
{"key":"Project","value":"rtyuui"},
{"key":"Sr Manager","value":"rfv"},
{"key":"PCM","value":"ujmk"},
{"key":"Contractor","value":"rfvtg"},
{"key":"Cost Client","value":"efgg"}
]
]
Any help would be greatly appreciated.
Edit:
I started with this by rewriting the "FOR JSON AUTO" so that I can add "Key" "Value" text somehow.
But because my table has space in the column name, FOR XML PATH('') giving invalid XML identifier as required by FOR XML error.
that is when I thought of taking community help.
Create PROCEDURE [dbo].[GetSQLtoJSON] #TableName VARCHAR(255)
AS
BEGIN
IF OBJECT_ID(#TableName) IS NULL
BEGIN
SELECT Json = '';
RETURN
END;
DECLARE #SQL NVARCHAR(MAX) = N'SELECT * INTO ##T ' +
'FROM ' + #TableName;
EXECUTE SP_EXECUTESQL #SQL;
DECLARE #X NVARCHAR(MAX) = '[' + (SELECT * FROM ##T FOR XML PATH('')) + ']';
SELECT #X = REPLACE(#X, '<' + Name + '>',
CASE WHEN ROW_NUMBER() OVER(ORDER BY Column_ID) = 1 THEN '{'
ELSE '' END + Name + ':'),
#X = REPLACE(#X, '</' + Name + '>', ','),
#X = REPLACE(#X, ',{', '}, {'),
#X = REPLACE(#X, ',]', '}]')
FROM sys.columns
WHERE [Object_ID] = OBJECT_ID(#TableName)
ORDER BY Column_ID;
DROP TABLE ##T;
SELECT Json = #X;
END
Sample data:
CREATE TABLE [dbo].[Test1](
[ID] [int] IDENTITY(1,1) NOT NULL,
[Col1] [int] NOT NULL,
[Col 2] varchar(50)
) ON [PRIMARY]
GO
SET IDENTITY_INSERT [dbo].[Test1] ON
GO
INSERT [dbo].[Test1] ([ID], [Col1], [Col 2]) VALUES (1, 0,'ABCD')
GO
INSERT [dbo].[Test1] ([ID], [Col1] ,[Col 2]) VALUES (2, 1, 'POIU')
GO
SET IDENTITY_INSERT [dbo].[Test1] OFF
GO

You can use the following code:
Inside an APPLY, unpivot the columns as key/value pairs...
... and aggregate using FOR JSON PATH
Use STRING_AGG to do another aggregation.
SELECT '[' + STRING_AGG(CAST(v.json AS nvarchar(max)), ',') + ']'
FROM T
CROSS APPLY (
SELECT *
FROM (VALUES
('ID', CAST(ID AS nvarchar(100))),
('Name', Name),
('Program', Program),
('Type', [Type]),
('Section', Section),
('Director', Director),
('Project', Project),
('Sr Manager', [Sr Manager]),
('PCM', PCM),
('Contractor', Contractor),
('Cost Client', [Cost Client])
) v([key], value)
FOR JSON PATH
) v(json)
db<>fiddle
You cannot use FOR JSON again, because then you will get ["json": [{"key" : ...

first of all check this link you can find what you want
format-query-results-as-json-with-for-json-sql-server
but in your case you can try this
SELECT
ID,Name,Program,Type,Section,
Director,Project,Sr,Manager,PCM,Contractor,Cost,Client
FROM table
FOR JSON AUTO;
check the link there is more sample so it can help you

Related

How can I use FOR JSON to build JSON in this format?

I'd like to use FOR JSON to build a data payload for an HTTP Post call. My Source table can be recreated with this snippet:
drop table if exists #jsonData;
drop table if exists #jsonColumns;
select
'carat' [column]
into #jsonColumns
union
select 'cut' union
select 'color' union
select 'clarity' union
select 'depth' union
select 'table' union
select 'x' union
select 'y' union
select 'z'
select
0.23 carat
,'Ideal' cut
,'E' color
,'SI2' clarity
,61.5 depth
,55.0 [table]
,3.95 x
,3.98 y
,2.43 z
into #jsonData
union
select 0.21,'Premium','E','SI1',59.8,61.0,3.89,3.84,2.31 union
select 0.29,'Premium','I','VS2',62.4,58.0,4.2,4.23,2.63 union
select 0.31,'Good','J','SI2',63.3,58.0,4.34,4.35,2.75
;
The data needs to be formatted as follows:
{
"columns":["carat","cut","color","clarity","depth","table","x","y","z"],
"data":[
[0.23,"Ideal","E","SI2",61.5,55.0,3.95,3.98,2.43],
[0.21,"Premium","E","SI1",59.8,61.0,3.89,3.84,2.31],
[0.23,"Good","E","VS1",56.9,65.0,4.05,4.07,2.31],
[0.29,"Premium","I","VS2",62.4,58.0,4.2,4.23,2.63],
[0.31,"Good","J","SI2",63.3,58.0,4.34,4.35,2.75]
]
}
My attempts thus far is as follows:
select
(select * from #jsonColumns for json path) as [columns],
(select * from #jsonData for json path) as [data]
for json path, without_array_wrapper
However this returns arrays of objects rather than values, like so:
{
"columns":[
{"column":"carat"},
{"column":"clarity"},
{"column":"color"},
{"column":"cut"},
{"column":"depth"},
{"column":"table"},
{"column":"x"},
{"column":"y"},
{"column":"z"}
]...
}
How can I limit the arrays to only showing the values?
Honestly, this seems like it's going to be easier with string aggregation rather than using the JSON functionality.
Because you're using using SQL Server 2016, you don't have access to STRING_AGG or CONCAT_WS, so the code is a lot longer. You have to make use of FOR XML PATH and STUFF instead and insert all the separators manually (why there's so many ',' in the CONCAT expression). This results in the below:
DECLARE #CRLF nchar(2) = NCHAR(13) + NCHAR(10);
SELECT N'{' + #CRLF +
N' "columns":[' + STUFF((SELECT ',' + QUOTENAME(c.[name],'"')
FROM tempdb.sys.columns c
JOIN tempdb.sys.tables t ON c.object_id = t.object_id
WHERE t.[name] LIKE N'#jsonData%' --Like isn't needed if not a temporary table. Use the literal name.
ORDER BY c.column_id ASC
FOR XML PATH(N''),TYPE).value('.','nvarchar(MAX)'),1,1,N'') + N'],' + #CRLF +
N' "data":[' + #CRLF +
STUFF((SELECT N',' + #CRLF +
N' ' + CONCAT('[',JD.carat,',',QUOTENAME(JD.cut,'"'),',',QUOTENAME(JD.color,'"'),',',QUOTENAME(JD.clarity,'"'),',',JD.depth,',',JD.[table],',',JD.x,',',JD.y,',',JD.z,']')
FROM #jsonData JD
ORDER BY JD.carat ASC
FOR XML PATH(N''),TYPE).value('.','nvarchar(MAX)'),1,3,N'') + #CRLF +
N' ]' + #CRLF +
N'}';
DB<>Fiddle

How can I use json values as columns in a query SQL?

I just want to receive a json data and use it's fields as a table column for another queries.
I'm trying to make the value in the key "nameProperty" into a column in a table, and the value of the keys "newValue"fill the rows of that column.
For example:
i get a json file like this
{
"operation":{
"ID":"ABC",
"KinshipDescription":"--"
},
"fields":[
{
"property":{
"nameProperty":"ID",
"oldValue":"",
"newValue":"123456",
"confirmed":"false",
"labelProperty":"ID",
"oldValueDescription":"",
"newValueDescription":"123456"
}
},
{
"property":{
"nameProperty":"Name",
"oldValue":"",
"newValue":"John",
"confirmed":"false",
"labelProperty":"Name",
"oldValueDescription":"",
"newValueDescription":"John"
}
}
]
}
I want to extract the objects on the list "fields", but i only can make them an row for key, and another row for values like the script below makes.
DECLARE #jsonObj NVARCHAR(MAX)
--Set a result in
SET #jsonObj = (select JSON_Query(data, '$.fields') from table where id = 'ABC')
select * from openjson(#jsonObj)
with (Property nvarchar(255) '$.property.nameProperty',
newValue nvarchar(50) '$.property.newValue')
and I have no idea how I can do this
the results of this script is something like this
ID 123456
Name John
and the results that i want to see is
ID Name --column name, not a row
123456 John
The quickest (thought-wise, not necessarily performance) way I can come up with on this is using dynamic SQL. In fact, I'm pretty certain you'll have to use it.
Here's an example that can get you moving. You can run this in SSMS.
DECLARE #json NVARCHAR(MAX) =
'{
"operation":{
"ID":"ABC",
"KinshipDescription":"--"
},
"fields":[
{
"property":{
"nameProperty":"ID",
"oldValue":"",
"newValue":"123456",
"confirmed":"false",
"labelProperty":"ID",
"oldValueDescription":"",
"newValueDescription":"123456"
}
},
{
"property":{
"nameProperty":"Name",
"oldValue":"",
"newValue":"John",
"confirmed":"false",
"labelProperty":"Name",
"oldValueDescription":"",
"newValueDescription":"John"
}
}
]
}';
-- Variable to hold the column/values.
DECLARE #cols VARCHAR(MAX) = '';
-- Generate the column/value pairs.
SELECT
#cols = #cols
+ CASE WHEN ( LEN( #cols ) > 0 ) THEN ', ' ELSE '' END -- add comma if needed.
+ '''' + Properties.newValue + ''' AS [' + Properties.nameProperty + '] '
FROM OPENJSON( #json, '$.fields' ) WITH (
property NVARCHAR(MAX) '$.property' AS JSON
)
CROSS APPLY (
SELECT * FROM OPENJSON( property ) WITH (
nameProperty VARCHAR(50) '$.nameProperty',
oldValue VARCHAR(50) '$.oldValue',
newValue VARCHAR(50) '$.newValue',
confirmed VARCHAR(50) '$.confirmed',
labelProperty VARCHAR(50) '$.labelProperty',
oldValueDescription VARCHAR(50) '$.oldValueDescription',
newValueDescription VARCHAR(50) '$.newValueDescription'
)
) AS Properties;
-- Execute column/value pairs as dynamic SQL.
EXEC ( 'SELECT ' + #cols );
Which returns:
+--------+------+
| ID | Name |
+--------+------+
| 123456 | John |
+--------+------+
If you were to PRINT #cols you would see
'123456' AS [ID] , 'John' AS [Name]
A few quick notes:
Performance may vary.
Values are quoted but can be CAST if needed.
Included all 'property' fields in CROSS APPLY for example. Only specify what is needed.
Note the use of NVARCHAR when using AS JSON
May want to consider OUTER APPLY if there's potential for no 'property' present.

What is the easiest way to output nested json in SQL Server? [duplicate]

I need to create a JSON output from a query that uses inner join between two tables with a one to many relationship.
I would like the values of the secondary table to be nested as array properties of the primary table.
Consider the following example:
DECLARE #Persons AS TABLE
(
person_id int primary key,
person_name varchar(20)
)
DECLARE #Pets AS TABLE
(
pet_owner int, -- in real tables, this would be a foreign key
pet_id int primary key,
pet_name varchar(10)
)
INSERT INTO #Persons (person_id, person_name) VALUES
(2, 'Jack'),
(3, 'Jill')
INSERT INTO #Pets (pet_owner, pet_id, pet_name) VALUES
(2, 4, 'Bug'),
(2, 5, 'Feature'),
(3, 6, 'Fiend')
And query:
DECLARE #Result as varchar(max)
SET #Result =
(
SELECT person_id as [person.id],
person_name as [person.name],
pet_id as [person.pet.id],
pet_name as [person.pet.name]
FROM #Persons
JOIN #Pets ON person_id = pet_owner
FOR JSON PATH, ROOT('pet owners')
)
PRINT #Result
This will print the following JSON:
{
"pet owners":
[
{"person":{"id":2,"name":"Jack","pet":{"id":4,"name":"Bug"}}},
{"person":{"id":2,"name":"Jack","pet":{"id":5,"name":"Feature"}}},
{"person":{"id":3,"name":"Jill","pet":{"id":6,"name":"Fiend"}}}
]
}
However, I would like to have the pets data as arrays inside the owners data:
{
"pet owners":
[
{
"person":
{
"id":2,"name":"Jack","pet":
[
{"id":4,"name":"Bug"},
{"id":5,"name":"Feature"}
]
}
},
{
"person":
{
"id":3,"name":"Jill","pet":
{"id":6,"name":"Fiend"}
}
}
]
}
How can I do this?
You can use the following query:
SELECT pr.person_id AS [person.id], pr.person_name AS [person.name],
(
SELECT pt.pet_id AS id, pt.pet_name AS name
FROM #Pets pt WHERE pt.pet_owner=pr.person_id
FOR JSON PATH
) AS [person.pet]
FROM #Persons pr
FOR JSON PATH, ROOT('pet owners')
For more information, see https://blogs.msdn.microsoft.com/sqlserverstorageengine/2015/10/09/returning-child-rows-formatted-as-json-in-sql-server-queries/
With deeply nested arrays the subqueries get unmanageable quickly:
select id,foo, (select id, bar, (select ... for json path) things,
(select...) more_things) yet_more, select(...) blarg
I create a relational (non-json) view that joins all my tables and has the json structure embedded in the column aliases, just like for json path does. But I also have [] to indicate that the json node is an array. Like this:
select p.id [id], p.foo [foo], c.name [children[].name], c.id [children[].id],
gp.name [grandparent.name], gc.name [children[].grandchildren[].name]
from parent p
join children c on c.parent_id = p.id .....
I wrote a stored procedure that creates a json view into the non-json view that parses the column names of the relational view and makes the json pretty. See below. Call it with the name of your relational view and it creates a view. It's not thoroughly tested but it works for me. Only caveat is that tables need to have id columns called id. It uses string_agg() and json_array() to the version of sql needs to be pretty new. It's also set up to return an array in the root. It will need tweaking to return an object.
create procedure create_json_from_view
#view_name varchar(max)
as
create table #doc_schema (
node_level int, -- nesting level starting with 0
node_name varchar(max), -- alias used for this nodes query
node_path varchar(max), -- full path to this node
parent_path varchar(max), -- full path to it's parents
is_array bit, -- is node marked as array by ending with []
select_columns varchar(max),-- comma separated path/alias pairs for selected columns on node
group_by_columns varchar(max), -- comma separated paths for selected columns on node. group by is necessary to prevent duplicates
node_parent_id varchar(max), -- the id column path to join subquery to parent. NOTE: ID COLUMN MUST BE CALLED ID
from_clause varchar(max), -- from clause built from above fields
node_query varchar(max) -- complete query built from above fields
)
/* get each node path from view schema
*/
INSERT INTO #doc_schema (node_path)
select distinct LEFT(COLUMN_NAME,CHARINDEX('.'+ VALUE + '.',COLUMN_NAME) + LEN(VALUE)) node_path
FROM INFORMATION_SCHEMA.COLUMNS
CROSS APPLY STRING_SPLIT(COLUMN_NAME, '.')
WHERE CHARINDEX('.',COLUMN_NAME) > 0
AND RIGHT(COLUMN_NAME,LEN(VALUE)) <> VALUE
and table_name = #view_name
/* node_name past rightmost period or the same as node_path if there is no period
also remove [] from arrays
*/
update #doc_schema set node_name =
case when charindex('.',node_path) = 0 then replace(node_path,'[]','')
else REPLACE(right(node_path,charindex('.',reverse(node_path)) - 1),'[]','') end
/* if path ends with [] node is array
escapes are necessary because [] have meaning for like
*/
update #doc_schema set is_array =
case when node_path like '%\[\]' escape '\' then 1 else 0 end --\
/* parent path is everything before last . in node path
except when the parent is the root, in which case parent is empty string
*/
update #doc_schema set parent_path =
case when charindex('.',node_path) = 0 then ''
else left(node_path,len(node_path) - charindex('.',reverse(node_path))) end
/* level is how many . in path. an ugly way to count.
*/
update #doc_schema set node_level = len(node_path) - len(replace(node_path,'.','')) + 1
/* set up root node
*/
insert into #doc_schema (node_path,node_name,parent_path,node_level,is_array)
select '','',null,0,1
/* I'm sorry this is so ugly. I just gave up on explaining
all paths need to be wrapped in [] and internal ] need to be escaped as ]]
*/
update #doc_schema set select_columns = sub2.select_columns, group_by_columns = sub2.group_by_columns
from (
select node_path,string_agg(column_path + ' ' + column_name,',') select_columns,
string_agg(column_path,',') group_by_columns
from (
select ds.node_path,'['+replace(c.COLUMN_NAME,']',']]')+']' column_path,replace(c.column_name,ds.node_path + '.','') column_name
from INFORMATION_SCHEMA.COLUMNS c
join #doc_schema ds
on (charindex(ds.node_path + '.', c.COLUMN_NAME) = 1
and charindex('.',replace(c.COLUMN_NAME,ds.node_path + '.','')) = 0)
or (ds.node_level = 0 and charindex('.',c.COLUMN_NAME) = 0)
where table_name = #view_name
) sub
group by node_path
) sub2
where #doc_schema.node_path = sub2.node_path
/* id paths for joining subqueries to parents
Again, the need to be wrapped in [] and and internal ] need to be escaped as ]]
*/
update #doc_schema set node_parent_id =
case when parent_path = '' then '[id]'
else '[' + replace(parent_path,']',']]')+'.id]'
end
/* table aliases for joining subqueries to parents need to be unique
just use L0 L1 etc based on nesting level
*/
update #doc_schema set from_clause =
case when node_level = 0 then ' from ' + #view_name + ' L'+cast(node_level as varchar(4)) + ' '
else ' from ' + #view_name + ' L'+cast(node_level as varchar(4))+' where L'+cast(node_level - 1 as varchar(4))+'.'+ node_parent_id +
' = L'+cast(node_level as varchar(4))+'.'+ node_parent_id
end
/* Assemble node query from all parts
###subqueries### is a place to put subqueries for node
*/
update #doc_schema set node_query =
' (select ' + select_columns + ', ###subqueries###' + from_clause
+ ' group by '+ group_by_columns
+' for json path) '
/* json path will treat all objects as arrays so select first explicitly
to prevent [] in json
*/
update #doc_schema set node_query =
case when is_array = 0
then '(select JSON_query(' + node_query + ',''$[0]'')) ' + node_name
else node_query + + node_name end
/* starting with highest nesting level substitute child subqueries ino
subquery hold in their parents
*/
declare #counter int = (select max(node_level) from #doc_schema)
while(#counter >= 0)
begin
update #doc_schema set node_query = replace(node_query,'###subqueries###', subs.subqueries)
from
(select parent_path, string_agg(node_query,',') subqueries, node_level from #doc_schema
group by parent_path, node_level ) subs
where subs.node_level = #counter and
#doc_schema.node_path = subs.parent_path
set #counter -= 1
end
/* objects and arrays with no subobjects or subarrays still have subquery holder so remove them
*/
update #doc_schema set node_query = replace(node_query,', ###subqueries###', '') where node_level = 0
declare #query nvarchar(max) = (select node_query from #doc_schema where node_level = 0)
/* add wrapper to query to specify column nave otherwise create view will fail
*/
set #query =
case when OBJECT_ID(#view_name + '_JSON', 'V') is NULL then 'create' else 'alter' end +
' view ' + #view_name + '_json as select' + #query + ' json'
exec sp_executesql #query
I have made below json format by following #Razvan Socol.
JSON
[
"domain_nm": "transactions",
"tables": [
{
"tableName": "transactions_details",
cols: [
{
"col_nm": "audit_transactions_details_guid",
"col_data_typ": "string"
}
]
}
]
]
SQL
select outer1.DOMAIN_NM as domain_nm,
(select inner2.TBL_NM as tableName,
(select inner1.COL_NM as col_nm, inner1.COL_DATA_TYP as col_data_typ
from ONBD_MTDT.CDM_TBL inner1
where inner1.TBL_NM=inner2.TBL_NM
FOR JSON PATH ) as cols
from ONBD_MTDT.CDM_TBL inner2
where inner2.DOMAIN_NM=outer1.DOMAIN_NM
group by inner2.DOMAIN_NM,inner2.TBL_NM
FOR JSON PATH ) as tables
from ONBD_MTDT.CDM_TBL outer1
group by outer1.DOMAIN_NM
FOR JSON PATH
It can be implemented like this
select OwnerFirstName, OwnerMiddleName , OwnerLastName, OwnerNumber,
ContactOwnerMailAddressUnit 'MailingAddress.UnitNumber',
ContactOwnerMailAddressUnitPrefix 'MailingAddress.UnitType',
case when ContactOwnerMailAddressHouseNumber='' then '' else ContactOwnerMailAddressHouseNumber + ' ' end+
ContactOwnerMailAddressStreetName +
case when ContactOwnerMailAddressStreetSuffix='' then '' else ' ' + ContactOwnerMailAddressStreetSuffix end 'MailingAddress.StreetAddress',
ContactOwnerMailAddressCity 'MailingAddress.City',
ContactOwnerMailAddressState 'MailingAddress.State',
ContactOwnerMailAddressZIP 'MailingAddress.ZipCode'
from T_Owners
join T_OwnersPropertiesMapping
on T_OwnersPropertiesMapping.OwnerID = T_Owners.OwnerID
where T_OwnersPropertiesMapping.PropertyID=#PropertyID
for json path
And here is result
[
{
"OwnerFirstName": "Bon 7360318",
"OwnerMiddleName": "Mr",
"OwnerLastName": "Jovi",
"OwnerNumber": 3,
"MailingAddress": {
"UnitNumber": "",
"UnitType": "",
"StreetAddress": "PO BOX 1736",
"City": "BOULDER CREEK",
"State": "CA",
"ZipCode": "95006"
}
},
{
"OwnerFirstName": "Bon 6717425",
"OwnerMiddleName": "Mr",
"OwnerLastName": "Jovi",
"OwnerNumber": 1,
"MailingAddress": {
"UnitNumber": "",
"UnitType": "",
"StreetAddress": "PO BOX 1736",
"City": "BOULDER CREEK",
"State": "CA",
"ZipCode": "95006"
}
}
]
Now, you’re flying completely blind. If the person who designed the API is sane, it is probably safe to assume that it will return an array of some kind of user objects — but what data each of the user objects actually carries can in no way be derived just from looking at this endpoint.

Generate object with an array of multiple objects and adding total amount JSON_QUERY [duplicate]

I need to create a JSON output from a query that uses inner join between two tables with a one to many relationship.
I would like the values of the secondary table to be nested as array properties of the primary table.
Consider the following example:
DECLARE #Persons AS TABLE
(
person_id int primary key,
person_name varchar(20)
)
DECLARE #Pets AS TABLE
(
pet_owner int, -- in real tables, this would be a foreign key
pet_id int primary key,
pet_name varchar(10)
)
INSERT INTO #Persons (person_id, person_name) VALUES
(2, 'Jack'),
(3, 'Jill')
INSERT INTO #Pets (pet_owner, pet_id, pet_name) VALUES
(2, 4, 'Bug'),
(2, 5, 'Feature'),
(3, 6, 'Fiend')
And query:
DECLARE #Result as varchar(max)
SET #Result =
(
SELECT person_id as [person.id],
person_name as [person.name],
pet_id as [person.pet.id],
pet_name as [person.pet.name]
FROM #Persons
JOIN #Pets ON person_id = pet_owner
FOR JSON PATH, ROOT('pet owners')
)
PRINT #Result
This will print the following JSON:
{
"pet owners":
[
{"person":{"id":2,"name":"Jack","pet":{"id":4,"name":"Bug"}}},
{"person":{"id":2,"name":"Jack","pet":{"id":5,"name":"Feature"}}},
{"person":{"id":3,"name":"Jill","pet":{"id":6,"name":"Fiend"}}}
]
}
However, I would like to have the pets data as arrays inside the owners data:
{
"pet owners":
[
{
"person":
{
"id":2,"name":"Jack","pet":
[
{"id":4,"name":"Bug"},
{"id":5,"name":"Feature"}
]
}
},
{
"person":
{
"id":3,"name":"Jill","pet":
{"id":6,"name":"Fiend"}
}
}
]
}
How can I do this?
You can use the following query:
SELECT pr.person_id AS [person.id], pr.person_name AS [person.name],
(
SELECT pt.pet_id AS id, pt.pet_name AS name
FROM #Pets pt WHERE pt.pet_owner=pr.person_id
FOR JSON PATH
) AS [person.pet]
FROM #Persons pr
FOR JSON PATH, ROOT('pet owners')
For more information, see https://blogs.msdn.microsoft.com/sqlserverstorageengine/2015/10/09/returning-child-rows-formatted-as-json-in-sql-server-queries/
With deeply nested arrays the subqueries get unmanageable quickly:
select id,foo, (select id, bar, (select ... for json path) things,
(select...) more_things) yet_more, select(...) blarg
I create a relational (non-json) view that joins all my tables and has the json structure embedded in the column aliases, just like for json path does. But I also have [] to indicate that the json node is an array. Like this:
select p.id [id], p.foo [foo], c.name [children[].name], c.id [children[].id],
gp.name [grandparent.name], gc.name [children[].grandchildren[].name]
from parent p
join children c on c.parent_id = p.id .....
I wrote a stored procedure that creates a json view into the non-json view that parses the column names of the relational view and makes the json pretty. See below. Call it with the name of your relational view and it creates a view. It's not thoroughly tested but it works for me. Only caveat is that tables need to have id columns called id. It uses string_agg() and json_array() to the version of sql needs to be pretty new. It's also set up to return an array in the root. It will need tweaking to return an object.
create procedure create_json_from_view
#view_name varchar(max)
as
create table #doc_schema (
node_level int, -- nesting level starting with 0
node_name varchar(max), -- alias used for this nodes query
node_path varchar(max), -- full path to this node
parent_path varchar(max), -- full path to it's parents
is_array bit, -- is node marked as array by ending with []
select_columns varchar(max),-- comma separated path/alias pairs for selected columns on node
group_by_columns varchar(max), -- comma separated paths for selected columns on node. group by is necessary to prevent duplicates
node_parent_id varchar(max), -- the id column path to join subquery to parent. NOTE: ID COLUMN MUST BE CALLED ID
from_clause varchar(max), -- from clause built from above fields
node_query varchar(max) -- complete query built from above fields
)
/* get each node path from view schema
*/
INSERT INTO #doc_schema (node_path)
select distinct LEFT(COLUMN_NAME,CHARINDEX('.'+ VALUE + '.',COLUMN_NAME) + LEN(VALUE)) node_path
FROM INFORMATION_SCHEMA.COLUMNS
CROSS APPLY STRING_SPLIT(COLUMN_NAME, '.')
WHERE CHARINDEX('.',COLUMN_NAME) > 0
AND RIGHT(COLUMN_NAME,LEN(VALUE)) <> VALUE
and table_name = #view_name
/* node_name past rightmost period or the same as node_path if there is no period
also remove [] from arrays
*/
update #doc_schema set node_name =
case when charindex('.',node_path) = 0 then replace(node_path,'[]','')
else REPLACE(right(node_path,charindex('.',reverse(node_path)) - 1),'[]','') end
/* if path ends with [] node is array
escapes are necessary because [] have meaning for like
*/
update #doc_schema set is_array =
case when node_path like '%\[\]' escape '\' then 1 else 0 end --\
/* parent path is everything before last . in node path
except when the parent is the root, in which case parent is empty string
*/
update #doc_schema set parent_path =
case when charindex('.',node_path) = 0 then ''
else left(node_path,len(node_path) - charindex('.',reverse(node_path))) end
/* level is how many . in path. an ugly way to count.
*/
update #doc_schema set node_level = len(node_path) - len(replace(node_path,'.','')) + 1
/* set up root node
*/
insert into #doc_schema (node_path,node_name,parent_path,node_level,is_array)
select '','',null,0,1
/* I'm sorry this is so ugly. I just gave up on explaining
all paths need to be wrapped in [] and internal ] need to be escaped as ]]
*/
update #doc_schema set select_columns = sub2.select_columns, group_by_columns = sub2.group_by_columns
from (
select node_path,string_agg(column_path + ' ' + column_name,',') select_columns,
string_agg(column_path,',') group_by_columns
from (
select ds.node_path,'['+replace(c.COLUMN_NAME,']',']]')+']' column_path,replace(c.column_name,ds.node_path + '.','') column_name
from INFORMATION_SCHEMA.COLUMNS c
join #doc_schema ds
on (charindex(ds.node_path + '.', c.COLUMN_NAME) = 1
and charindex('.',replace(c.COLUMN_NAME,ds.node_path + '.','')) = 0)
or (ds.node_level = 0 and charindex('.',c.COLUMN_NAME) = 0)
where table_name = #view_name
) sub
group by node_path
) sub2
where #doc_schema.node_path = sub2.node_path
/* id paths for joining subqueries to parents
Again, the need to be wrapped in [] and and internal ] need to be escaped as ]]
*/
update #doc_schema set node_parent_id =
case when parent_path = '' then '[id]'
else '[' + replace(parent_path,']',']]')+'.id]'
end
/* table aliases for joining subqueries to parents need to be unique
just use L0 L1 etc based on nesting level
*/
update #doc_schema set from_clause =
case when node_level = 0 then ' from ' + #view_name + ' L'+cast(node_level as varchar(4)) + ' '
else ' from ' + #view_name + ' L'+cast(node_level as varchar(4))+' where L'+cast(node_level - 1 as varchar(4))+'.'+ node_parent_id +
' = L'+cast(node_level as varchar(4))+'.'+ node_parent_id
end
/* Assemble node query from all parts
###subqueries### is a place to put subqueries for node
*/
update #doc_schema set node_query =
' (select ' + select_columns + ', ###subqueries###' + from_clause
+ ' group by '+ group_by_columns
+' for json path) '
/* json path will treat all objects as arrays so select first explicitly
to prevent [] in json
*/
update #doc_schema set node_query =
case when is_array = 0
then '(select JSON_query(' + node_query + ',''$[0]'')) ' + node_name
else node_query + + node_name end
/* starting with highest nesting level substitute child subqueries ino
subquery hold in their parents
*/
declare #counter int = (select max(node_level) from #doc_schema)
while(#counter >= 0)
begin
update #doc_schema set node_query = replace(node_query,'###subqueries###', subs.subqueries)
from
(select parent_path, string_agg(node_query,',') subqueries, node_level from #doc_schema
group by parent_path, node_level ) subs
where subs.node_level = #counter and
#doc_schema.node_path = subs.parent_path
set #counter -= 1
end
/* objects and arrays with no subobjects or subarrays still have subquery holder so remove them
*/
update #doc_schema set node_query = replace(node_query,', ###subqueries###', '') where node_level = 0
declare #query nvarchar(max) = (select node_query from #doc_schema where node_level = 0)
/* add wrapper to query to specify column nave otherwise create view will fail
*/
set #query =
case when OBJECT_ID(#view_name + '_JSON', 'V') is NULL then 'create' else 'alter' end +
' view ' + #view_name + '_json as select' + #query + ' json'
exec sp_executesql #query
I have made below json format by following #Razvan Socol.
JSON
[
"domain_nm": "transactions",
"tables": [
{
"tableName": "transactions_details",
cols: [
{
"col_nm": "audit_transactions_details_guid",
"col_data_typ": "string"
}
]
}
]
]
SQL
select outer1.DOMAIN_NM as domain_nm,
(select inner2.TBL_NM as tableName,
(select inner1.COL_NM as col_nm, inner1.COL_DATA_TYP as col_data_typ
from ONBD_MTDT.CDM_TBL inner1
where inner1.TBL_NM=inner2.TBL_NM
FOR JSON PATH ) as cols
from ONBD_MTDT.CDM_TBL inner2
where inner2.DOMAIN_NM=outer1.DOMAIN_NM
group by inner2.DOMAIN_NM,inner2.TBL_NM
FOR JSON PATH ) as tables
from ONBD_MTDT.CDM_TBL outer1
group by outer1.DOMAIN_NM
FOR JSON PATH
It can be implemented like this
select OwnerFirstName, OwnerMiddleName , OwnerLastName, OwnerNumber,
ContactOwnerMailAddressUnit 'MailingAddress.UnitNumber',
ContactOwnerMailAddressUnitPrefix 'MailingAddress.UnitType',
case when ContactOwnerMailAddressHouseNumber='' then '' else ContactOwnerMailAddressHouseNumber + ' ' end+
ContactOwnerMailAddressStreetName +
case when ContactOwnerMailAddressStreetSuffix='' then '' else ' ' + ContactOwnerMailAddressStreetSuffix end 'MailingAddress.StreetAddress',
ContactOwnerMailAddressCity 'MailingAddress.City',
ContactOwnerMailAddressState 'MailingAddress.State',
ContactOwnerMailAddressZIP 'MailingAddress.ZipCode'
from T_Owners
join T_OwnersPropertiesMapping
on T_OwnersPropertiesMapping.OwnerID = T_Owners.OwnerID
where T_OwnersPropertiesMapping.PropertyID=#PropertyID
for json path
And here is result
[
{
"OwnerFirstName": "Bon 7360318",
"OwnerMiddleName": "Mr",
"OwnerLastName": "Jovi",
"OwnerNumber": 3,
"MailingAddress": {
"UnitNumber": "",
"UnitType": "",
"StreetAddress": "PO BOX 1736",
"City": "BOULDER CREEK",
"State": "CA",
"ZipCode": "95006"
}
},
{
"OwnerFirstName": "Bon 6717425",
"OwnerMiddleName": "Mr",
"OwnerLastName": "Jovi",
"OwnerNumber": 1,
"MailingAddress": {
"UnitNumber": "",
"UnitType": "",
"StreetAddress": "PO BOX 1736",
"City": "BOULDER CREEK",
"State": "CA",
"ZipCode": "95006"
}
}
]
Now, you’re flying completely blind. If the person who designed the API is sane, it is probably safe to assume that it will return an array of some kind of user objects — but what data each of the user objects actually carries can in no way be derived just from looking at this endpoint.

Create nested JSON arrays using FOR JSON PATH

I need to create a JSON output from a query that uses inner join between two tables with a one to many relationship.
I would like the values of the secondary table to be nested as array properties of the primary table.
Consider the following example:
DECLARE #Persons AS TABLE
(
person_id int primary key,
person_name varchar(20)
)
DECLARE #Pets AS TABLE
(
pet_owner int, -- in real tables, this would be a foreign key
pet_id int primary key,
pet_name varchar(10)
)
INSERT INTO #Persons (person_id, person_name) VALUES
(2, 'Jack'),
(3, 'Jill')
INSERT INTO #Pets (pet_owner, pet_id, pet_name) VALUES
(2, 4, 'Bug'),
(2, 5, 'Feature'),
(3, 6, 'Fiend')
And query:
DECLARE #Result as varchar(max)
SET #Result =
(
SELECT person_id as [person.id],
person_name as [person.name],
pet_id as [person.pet.id],
pet_name as [person.pet.name]
FROM #Persons
JOIN #Pets ON person_id = pet_owner
FOR JSON PATH, ROOT('pet owners')
)
PRINT #Result
This will print the following JSON:
{
"pet owners":
[
{"person":{"id":2,"name":"Jack","pet":{"id":4,"name":"Bug"}}},
{"person":{"id":2,"name":"Jack","pet":{"id":5,"name":"Feature"}}},
{"person":{"id":3,"name":"Jill","pet":{"id":6,"name":"Fiend"}}}
]
}
However, I would like to have the pets data as arrays inside the owners data:
{
"pet owners":
[
{
"person":
{
"id":2,"name":"Jack","pet":
[
{"id":4,"name":"Bug"},
{"id":5,"name":"Feature"}
]
}
},
{
"person":
{
"id":3,"name":"Jill","pet":
{"id":6,"name":"Fiend"}
}
}
]
}
How can I do this?
You can use the following query:
SELECT pr.person_id AS [person.id], pr.person_name AS [person.name],
(
SELECT pt.pet_id AS id, pt.pet_name AS name
FROM #Pets pt WHERE pt.pet_owner=pr.person_id
FOR JSON PATH
) AS [person.pet]
FROM #Persons pr
FOR JSON PATH, ROOT('pet owners')
For more information, see https://blogs.msdn.microsoft.com/sqlserverstorageengine/2015/10/09/returning-child-rows-formatted-as-json-in-sql-server-queries/
With deeply nested arrays the subqueries get unmanageable quickly:
select id,foo, (select id, bar, (select ... for json path) things,
(select...) more_things) yet_more, select(...) blarg
I create a relational (non-json) view that joins all my tables and has the json structure embedded in the column aliases, just like for json path does. But I also have [] to indicate that the json node is an array. Like this:
select p.id [id], p.foo [foo], c.name [children[].name], c.id [children[].id],
gp.name [grandparent.name], gc.name [children[].grandchildren[].name]
from parent p
join children c on c.parent_id = p.id .....
I wrote a stored procedure that creates a json view into the non-json view that parses the column names of the relational view and makes the json pretty. See below. Call it with the name of your relational view and it creates a view. It's not thoroughly tested but it works for me. Only caveat is that tables need to have id columns called id. It uses string_agg() and json_array() to the version of sql needs to be pretty new. It's also set up to return an array in the root. It will need tweaking to return an object.
create procedure create_json_from_view
#view_name varchar(max)
as
create table #doc_schema (
node_level int, -- nesting level starting with 0
node_name varchar(max), -- alias used for this nodes query
node_path varchar(max), -- full path to this node
parent_path varchar(max), -- full path to it's parents
is_array bit, -- is node marked as array by ending with []
select_columns varchar(max),-- comma separated path/alias pairs for selected columns on node
group_by_columns varchar(max), -- comma separated paths for selected columns on node. group by is necessary to prevent duplicates
node_parent_id varchar(max), -- the id column path to join subquery to parent. NOTE: ID COLUMN MUST BE CALLED ID
from_clause varchar(max), -- from clause built from above fields
node_query varchar(max) -- complete query built from above fields
)
/* get each node path from view schema
*/
INSERT INTO #doc_schema (node_path)
select distinct LEFT(COLUMN_NAME,CHARINDEX('.'+ VALUE + '.',COLUMN_NAME) + LEN(VALUE)) node_path
FROM INFORMATION_SCHEMA.COLUMNS
CROSS APPLY STRING_SPLIT(COLUMN_NAME, '.')
WHERE CHARINDEX('.',COLUMN_NAME) > 0
AND RIGHT(COLUMN_NAME,LEN(VALUE)) <> VALUE
and table_name = #view_name
/* node_name past rightmost period or the same as node_path if there is no period
also remove [] from arrays
*/
update #doc_schema set node_name =
case when charindex('.',node_path) = 0 then replace(node_path,'[]','')
else REPLACE(right(node_path,charindex('.',reverse(node_path)) - 1),'[]','') end
/* if path ends with [] node is array
escapes are necessary because [] have meaning for like
*/
update #doc_schema set is_array =
case when node_path like '%\[\]' escape '\' then 1 else 0 end --\
/* parent path is everything before last . in node path
except when the parent is the root, in which case parent is empty string
*/
update #doc_schema set parent_path =
case when charindex('.',node_path) = 0 then ''
else left(node_path,len(node_path) - charindex('.',reverse(node_path))) end
/* level is how many . in path. an ugly way to count.
*/
update #doc_schema set node_level = len(node_path) - len(replace(node_path,'.','')) + 1
/* set up root node
*/
insert into #doc_schema (node_path,node_name,parent_path,node_level,is_array)
select '','',null,0,1
/* I'm sorry this is so ugly. I just gave up on explaining
all paths need to be wrapped in [] and internal ] need to be escaped as ]]
*/
update #doc_schema set select_columns = sub2.select_columns, group_by_columns = sub2.group_by_columns
from (
select node_path,string_agg(column_path + ' ' + column_name,',') select_columns,
string_agg(column_path,',') group_by_columns
from (
select ds.node_path,'['+replace(c.COLUMN_NAME,']',']]')+']' column_path,replace(c.column_name,ds.node_path + '.','') column_name
from INFORMATION_SCHEMA.COLUMNS c
join #doc_schema ds
on (charindex(ds.node_path + '.', c.COLUMN_NAME) = 1
and charindex('.',replace(c.COLUMN_NAME,ds.node_path + '.','')) = 0)
or (ds.node_level = 0 and charindex('.',c.COLUMN_NAME) = 0)
where table_name = #view_name
) sub
group by node_path
) sub2
where #doc_schema.node_path = sub2.node_path
/* id paths for joining subqueries to parents
Again, the need to be wrapped in [] and and internal ] need to be escaped as ]]
*/
update #doc_schema set node_parent_id =
case when parent_path = '' then '[id]'
else '[' + replace(parent_path,']',']]')+'.id]'
end
/* table aliases for joining subqueries to parents need to be unique
just use L0 L1 etc based on nesting level
*/
update #doc_schema set from_clause =
case when node_level = 0 then ' from ' + #view_name + ' L'+cast(node_level as varchar(4)) + ' '
else ' from ' + #view_name + ' L'+cast(node_level as varchar(4))+' where L'+cast(node_level - 1 as varchar(4))+'.'+ node_parent_id +
' = L'+cast(node_level as varchar(4))+'.'+ node_parent_id
end
/* Assemble node query from all parts
###subqueries### is a place to put subqueries for node
*/
update #doc_schema set node_query =
' (select ' + select_columns + ', ###subqueries###' + from_clause
+ ' group by '+ group_by_columns
+' for json path) '
/* json path will treat all objects as arrays so select first explicitly
to prevent [] in json
*/
update #doc_schema set node_query =
case when is_array = 0
then '(select JSON_query(' + node_query + ',''$[0]'')) ' + node_name
else node_query + + node_name end
/* starting with highest nesting level substitute child subqueries ino
subquery hold in their parents
*/
declare #counter int = (select max(node_level) from #doc_schema)
while(#counter >= 0)
begin
update #doc_schema set node_query = replace(node_query,'###subqueries###', subs.subqueries)
from
(select parent_path, string_agg(node_query,',') subqueries, node_level from #doc_schema
group by parent_path, node_level ) subs
where subs.node_level = #counter and
#doc_schema.node_path = subs.parent_path
set #counter -= 1
end
/* objects and arrays with no subobjects or subarrays still have subquery holder so remove them
*/
update #doc_schema set node_query = replace(node_query,', ###subqueries###', '') where node_level = 0
declare #query nvarchar(max) = (select node_query from #doc_schema where node_level = 0)
/* add wrapper to query to specify column nave otherwise create view will fail
*/
set #query =
case when OBJECT_ID(#view_name + '_JSON', 'V') is NULL then 'create' else 'alter' end +
' view ' + #view_name + '_json as select' + #query + ' json'
exec sp_executesql #query
I have made below json format by following #Razvan Socol.
JSON
[
"domain_nm": "transactions",
"tables": [
{
"tableName": "transactions_details",
cols: [
{
"col_nm": "audit_transactions_details_guid",
"col_data_typ": "string"
}
]
}
]
]
SQL
select outer1.DOMAIN_NM as domain_nm,
(select inner2.TBL_NM as tableName,
(select inner1.COL_NM as col_nm, inner1.COL_DATA_TYP as col_data_typ
from ONBD_MTDT.CDM_TBL inner1
where inner1.TBL_NM=inner2.TBL_NM
FOR JSON PATH ) as cols
from ONBD_MTDT.CDM_TBL inner2
where inner2.DOMAIN_NM=outer1.DOMAIN_NM
group by inner2.DOMAIN_NM,inner2.TBL_NM
FOR JSON PATH ) as tables
from ONBD_MTDT.CDM_TBL outer1
group by outer1.DOMAIN_NM
FOR JSON PATH
It can be implemented like this
select OwnerFirstName, OwnerMiddleName , OwnerLastName, OwnerNumber,
ContactOwnerMailAddressUnit 'MailingAddress.UnitNumber',
ContactOwnerMailAddressUnitPrefix 'MailingAddress.UnitType',
case when ContactOwnerMailAddressHouseNumber='' then '' else ContactOwnerMailAddressHouseNumber + ' ' end+
ContactOwnerMailAddressStreetName +
case when ContactOwnerMailAddressStreetSuffix='' then '' else ' ' + ContactOwnerMailAddressStreetSuffix end 'MailingAddress.StreetAddress',
ContactOwnerMailAddressCity 'MailingAddress.City',
ContactOwnerMailAddressState 'MailingAddress.State',
ContactOwnerMailAddressZIP 'MailingAddress.ZipCode'
from T_Owners
join T_OwnersPropertiesMapping
on T_OwnersPropertiesMapping.OwnerID = T_Owners.OwnerID
where T_OwnersPropertiesMapping.PropertyID=#PropertyID
for json path
And here is result
[
{
"OwnerFirstName": "Bon 7360318",
"OwnerMiddleName": "Mr",
"OwnerLastName": "Jovi",
"OwnerNumber": 3,
"MailingAddress": {
"UnitNumber": "",
"UnitType": "",
"StreetAddress": "PO BOX 1736",
"City": "BOULDER CREEK",
"State": "CA",
"ZipCode": "95006"
}
},
{
"OwnerFirstName": "Bon 6717425",
"OwnerMiddleName": "Mr",
"OwnerLastName": "Jovi",
"OwnerNumber": 1,
"MailingAddress": {
"UnitNumber": "",
"UnitType": "",
"StreetAddress": "PO BOX 1736",
"City": "BOULDER CREEK",
"State": "CA",
"ZipCode": "95006"
}
}
]
Now, you’re flying completely blind. If the person who designed the API is sane, it is probably safe to assume that it will return an array of some kind of user objects — but what data each of the user objects actually carries can in no way be derived just from looking at this endpoint.