I have a N1QL query:
SELECT p.`ID`, p.`Name` FROM `Preferences` p WHERE `type` = "myType"
The result is a list of objects[{"ID": "123", "Name": "John"}, ...]
I want to get a result JSON such as:
{
"count": 5,
"result": [{"ID": "123", "Name": "John"}, ...]
}
How could I do this using N1QL?
SELECT
COUNT(t.ID) AS count,
ARRAY_AGG(t) AS results
FROM
(
SELECT
p.`ID`, p.`Name`
FROM
`Preferences` p
WHERE `type` = "myType"
) AS t
Related
Currently I have this piece of code
DECLARE #json NVARCHAR(MAX)
SET #json =
N'[
{
"objOrg": {
"EmpIds": [
{
"Id": 101
},
{
"Id": 102
},
{
"Id": 103
}
]
}
}
]'
How can I return EmpId values pivoted such as
Id1
Id2
Id3
101
102
103
OPENJSON without a schema will return the array index. Then pass the inner object to OPENJSON again to parse it out, and pivot the final result using PIVOT or MAX(CASE
DECLARE #json nvarchar(max) =
N'[
{
"objOrg": {
"EmpIds": [
{
"Id": 101
},
{
"Id": 102
},
{
"Id": 103
}
]
}
}
]';
SELECT MAX(CASE WHEN arr.[key] = 0 THEN ID END) AS Id1,
MAX(CASE WHEN arr.[key] = 1 THEN ID END) AS Id2,
MAX(CASE WHEN arr.[key] = 2 THEN ID END) AS Id3
FROM OPENJSON(#json, '$[0].objOrg.EmpIds') arr
CROSS APPLY OPENJSON (arr.value)
WITH (
Id int
) AS j;
-- alternatively
SELECT p.*
FROM (
SELECT arr.[key] + 1 AS [key], j.Id
FROM OPENJSON(#json, '$[0].objOrg.EmpIds') arr
CROSS APPLY OPENJSON (arr.value)
WITH (
Id int
) AS j
) j
PIVOT (
MAX(j.Id) FOR j.[key] IN
([1], [2], [3])
) p;
db<>fiddle
You can use OPENJSON() along with ROW_NUMBER() window function such as
DECLARE
#json AS NVARCHAR(MAX),
#query AS NVARCHAR(MAX);
SET #json =
N'[
{
"objOrg": {
"EmpIds": [
{
"Id": 101
},
{
"Id": 102
},
{
"Id": 103
}
]
}
}
]';
SELECT j.*, ROW_NUMBER() OVER (ORDER BY j.Id) AS rn
INTO t_json
FROM OPENJSON(#json)
WITH (
JS NVARCHAR(MAX) '$.objOrg.EmpIds' AS JSON
) AS j0
CROSS APPLY OPENJSON (j0.JS)
WITH (
Id INT '$.Id'
) AS j;
SET #query = CONCAT('SELECT',
STUFF(
(SELECT CONCAT(', MAX(CASE WHEN rn=' , CAST(rn AS VARCHAR) , ' THEN Id END) AS Id', CAST(rn AS VARCHAR))
FROM t_json
ORDER BY rn
FOR XML PATH(''), type).value('.', 'NVARCHAR(MAX)'),
1,1,''
),' FROM t_json');
EXECUTE(#query)
Demo
I'm learning Postgresql and Json.
I have for example database like that:
CREATE TABLE employees (
employee_id serial primary key,
department_id integer references departments(department_id),
name text,
start_date date,
fingers integer,
geom geometry(point, 4326)
);
CREATE TABLE departments (
department_id bigint primary key,
name text
);
INSERT INTO departments
(department_id, name)
VALUES
(1, 'spatial'),
(2, 'cloud');
INSERT INTO employees
(department_id, name, start_date, fingers, geom)
VALUES
(1, 'Paul', '2018/09/02', 10, 'POINT(-123.32977 48.40732)'),
(1, 'Martin', '2019/09/02', 9, 'POINT(-123.32977 48.40732)'),
(2, 'Craig', '2019/11/01', 10, 'POINT(-122.33207 47.60621)'),
(2, 'Dan', '2020/10/01', 8, 'POINT(-122.33207 47.60621)');
How could i do so i could get the data like this:
[
{
"department_name": "cloud",
"employees": [
{
"name": "Craig",
"start_date": "2019-11-01"
},
{
"name": "Dan",
"start_date": "2020-10-01"
}
]
},
{
"department_name": "spatial",
"employees": [
{
"name": "Paul",
"start_date": "2018-09-02"
},
{
"name": "Martin",
"start_date": "2019-09-02"
}
]
}
]
follow this link: https://dba.stackexchange.com/questions/69655/select-columns-inside-json-agg/200240#200240
CREATE TEMP TABLE x (
name text,
start_date date
);
WITH cte AS (
SELECT
d.name AS department_name,
json_agg((e.name, e.start_date)::x) AS employees
FROM
departments d
JOIN employees e ON d.department_id = e.department_id
GROUP BY
1
)
SELECT
json_agg((row_to_json(cte.*)))
FROM
cte;
I have 1 order number which contains 4 skus, each sku is linked to 3 categories:
I need to write a SQL statement to convert this into JSON format which involves subquery and multiple elements in an array. Basically I need it to look like this:
{
"order_number": "WEB000000000",
"order_items": [
{
"sku": 111111111,
"categories": ["Checked Shirts", "Mens", "Shirts"]
},
{
"sku": 333333333,
"categories": ["Accessories & Shoes", "Mens Accessories", "Socks"]
},
{
"sku": 666666666,
"categories": ["Checked Shirts", "Mens", "Shirts"]
},
{
"sku": 999999999,
"categories": ["Nightwear", "Slippers", "Womens"]
}
]
}
Here's what I have so far but I just can't get it quite right:
DROP TABLE IF EXISTS ##Data;
CREATE TABLE ##Data
(
order_number varchar(100),
sku bigint,
categories varchar(100)
);
INSERT INTO ##Data
select 'WEB000000000', 111111111, 'Mens'
union all select 'WEB000000000', 111111111, 'Shirts'
union all select 'WEB000000000', 111111111, 'Checked Shirts'
union all select 'WEB000000000', 333333333, 'Accessories & Shoes'
union all select 'WEB000000000', 333333333, 'Mens Accessories'
union all select 'WEB000000000', 333333333, 'Socks'
union all select 'WEB000000000', 666666666, 'Mens'
union all select 'WEB000000000', 666666666, 'Shirts'
union all select 'WEB000000000', 666666666, 'Checked Shirts'
union all select 'WEB000000000', 999999999, 'Womens'
union all select 'WEB000000000', 999999999, 'Nightwear'
union all select 'WEB000000000', 999999999, 'Slippers'
SELECT * FROM ##Data;
select OUTER1.[order_number] as [order_number],
(select OSL.[order_number],
(select [sku],
(select [categories]
from ##Data skus
where order_item.[order_number] = skus.[order_number]
and order_item.[sku] = skus.[sku]
GROUP BY [categories]
FOR JSON PATH) as categories
from ##Data order_item
where order_item.[order_number] = OSL.[order_number]
GROUP BY [order_number], [sku]
FOR JSON PATH) as order_items
from ##Data OSL
where OSL.[order_number]=OUTER1.[order_number]
group by OSL.[order_number]
FOR JSON PATH, WITHOUT_ARRAY_WRAPPER) AS JSON
from ##Data OUTER1
group by OUTER1.[order_number]
drop table ##Data
Trying to figure out how to make my couchbase return an object like so:
{
items: [],
totalItemsCount: T<number>,
}
My select is formatted like so:
SELECT a.*, ( SELECT COUNT(*) FROM table b WHERE b.environment = "test" AND b.DocType = "GM360.User") as Count
FROM table a WHERE a.environment = "test"
AND a.DocType = "Moderator.User"
limit 5 offset (5 * (1 -1) )
And the result looks like:
[
{ Accounts: [], UserId: 1, Count: 199 },
{ Accounts: [], UserId:, 2, Count: 199 },
]
The following query gives result object you are expecting. If that is not explain the problem more clearly.
SELECT (SELECT RAW a
FROM table AS a
WHERE a.environment = "test" AND a.DocType = "Moderator.User") AS items,
(SELECT RAW COUNT(1)
FROM table b
WHERE b.environment = "test" AND b.DocType = "GM360.User")[0] AS totalItemsCount;
OR
SELECT SUM(CASE WHEN a.DocType = "GM360.User" THEN 1 ELSE 0 END) AS totalItemsCount,
ARRAY_AGG(CASE WHEN a.DocType = "Moderator.User" THEN a ELSE MISSING END) AS items
FROM table AS a
WHERE a.environment = "test" AND a.DocType IN ["Moderator.User", "GM360.User"];
I'd like to limit my results to one row per day, that is the newest one for every day when i do:
SELECT * FROM reports WHERE item = :item_id ORDER BY date DESC
Only 1 record per day, the records selected for each day needs to be the latest one at that day as well.
I really have no idea what i should try. Search results gave me no directions.
I am looking for a complete solution.
Here is example data from my table, in JSON, selected for just a single item:
[{
"id": "62",
"user": "7",
"item": "19333",
"instant_buy": "798000",
"instant_sell": "675000",
"upvotes": "0",
"downvotes": "0",
"created": "2017-06-18 14:01:32"
},
{
"id": "61",
"user": "7",
"item": "19333",
"instant_buy": "899999",
"instant_sell": "735647",
"upvotes": "0",
"downvotes": "0",
"created": "2017-06-18 11:48:25"
},
{
"id": "55",
"user": "4",
"item": "19333",
"instant_buy": "1387166",
"instant_sell": "1050000",
"upvotes": "0",
"downvotes": "0",
"created": "2017-06-17 12:11:30"
},
{
"id": "38",
"user": "4",
"item": "19333",
"instant_buy": "1850000",
"instant_sell": "900000",
"upvotes": "0",
"downvotes": "0",
"created": "2017-06-16 15:48:02"
},
{
"id": "36",
"user": "1",
"item": "19333",
"instant_buy": "1529350",
"instant_sell": "900000",
"upvotes": "1",
"downvotes": "0",
"created": "2017-06-16 14:26:41"
}]
You coud use a join with the user and max(created) grouped by user and date()
SELECT *
FROM reports r
INNER JOIN (
select user, max(created) max_created
from reports
group by user, date(created)
) t on t.user = r.user and t.max_created = r_created
You can use GROUP BY on date column. Something similar to
SELECT * FROM reports
WHERE item = :item_id
GROUP BY DATE_FORMAT(date,'%m-%d-%Y')
ORDER BY date DESC
try something like that:
select reports.*
from reports inner join
(select distinct date(Date), (select ID from reports
where date(Date) = date(r1.Date) and item = :item_id
order by Date desc
limit 1) ID
from Reports r1 where item = :item_id) s1
on reports.id = s1.id
depending if you want the first or the last entry of the date you should change the ordering the s1 subquery