Select from mysql JSON field where array starts with subarray - mysql

I'd like to select a row in a mysql table, filtering on the start of an array contained in a json field in the row.
CREATE TABLE test (id int, c1 json);
INSERT INTO test VALUES
(1, '{ "path": ["a", "b", "c"] }'),
(2, '{ "path": ["a", "b"] }'),
(3, '{ "path": ["a", "b", "d"] }'),
(4, '{ "path": ["a"] }'),
(5, '{ "path": ["e", "a", "b"] }')
(6, '{ "path": ["a", "e", "b"] }');
So with the setup above, I would like to search for paths starting with ["a", "b"] and get 1, 2, and 3.
4, 5 and 6 would not be returned as they do not start with the path ["a", "b"].
select c1 from test where json_contains(c1, '["a", "b"]', '$.path'); is as close as I've found, but fails as json_contains does not try and match the items contiguously or from the start of the array.

select id from test where json_extract(c1,"$.path[0]") ='a' and json_extract(c1,"$.path[1]") ='b';

Related

How to query nested array of jsonb

I am working on a PostgreSQL 11 table with a column of nested and multiple jsonb objects
to simulate the issue: -
CREATE TABLE public.test
(
id integer NOT NULL DEFAULT nextval('test_id_seq'::regclass),
testcol jsonb
)
insert into test (testcol) values
('[{"type": {"value": 1, "displayName": "flag1"}, "value": "10"},
{"type": {"value": 2, "displayName": "flag2"}, "value": "20"},
{"type": {"value": 3, "displayName": "flag3"}, "value": "30"},
{"type": {"value": 4, "displayName": "flag4"}},
{"type": {"value": 4, "displayName": "flag4"}},
{"type": {"value": 6, "displayName": "flag6"}, "value": "40"}]');
I am trying to:
get outer value if type= specific value. e.g. get the value 30, if flag3 is in displayname.
count occurrence of flag4 in inner json
You could use json_to_recordset to parse it:
WITH cte AS (
SELECT test.id, sub."type"->'value' AS t_value, sub."type"->'displayName' AS t_name, value
FROM test
,LATERAL jsonb_to_recordset(testcol) sub("type" jsonb, "value" int)
)
SELECT *
FROM cte
-- WHERE ...
-- GROUP BY ...;
db<>fiddle demo

Query jsonb column to match an array of keys

I have a table items which has a jsonb column data.
The data column is something like this {"name": "aaa", "age": 23, "job": "dev"}.
How do I select items that the data has only the keys name, age?.
You can use the ? and the ?& operators.
For your usecase, it will be:
SELECT * FROM table WHERE (NOT data ? 'job') AND (data ?& array ['name', 'age'])
Use the delete operator -, example:
with items (data) as (
values
('{"name": "aaa", "age": 23}'::jsonb),
('{"name": "aaa", "age": 23, "job": "dev"}'),
('{"name": "aaa", "age": 23, "gender": "f"}')
)
select *
from items
where data - 'name'- 'age' = '{}'
data
----------------------------
{"age": 23, "name": "aaa"}
(1 row)
In Postgres 10+ you can use a text array:
select *
from items
where data - array['name', 'age'] = '{}'

How to make pgsql return the json array

everyone , I face some issue to convert the data into json object. There is a table called milestone with the following data:
id name parentId
a test1 A
b test2 B
c test3 C
I want to convert the result into a json type in Postgres:
[{"id": "a", "name": "test1", "parentId": "A"}]
[{"id": "b", "name": "test2", "parentId": "B"}]
[{"id": "c", "name": "test3", "parentId": "C"}]
if there are anyone know how to handle , please let me know , thanks all
You can get each row of the table as simple json object with to_jsonb():
select to_jsonb(m)
from milestone m
to_jsonb
-----------------------------------------------
{"id": "a", "name": "test1", "parentid": "A"}
{"id": "b", "name": "test2", "parentid": "B"}
{"id": "c", "name": "test3", "parentid": "C"}
(3 rows)
If you want to get a single element array for each row, use jsonb_build_array():
select jsonb_build_array(to_jsonb(m))
from milestone m
jsonb_build_array
-------------------------------------------------
[{"id": "a", "name": "test1", "parentid": "A"}]
[{"id": "b", "name": "test2", "parentid": "B"}]
[{"id": "c", "name": "test3", "parentid": "C"}]
(3 rows)
You can also get all rows as a json array with jsonb_agg():
select jsonb_agg(to_jsonb(m))
from milestone m
jsonb_agg
-----------------------------------------------------------------------------------------------------------------------------------------------
[{"id": "a", "name": "test1", "parentid": "A"}, {"id": "b", "name": "test2", "parentid": "B"}, {"id": "c", "name": "test3", "parentid": "C"}]
(1 row)
Read about JSON Functions and Operators in the documentation.
You can use ROW_TO_JSON
From Documentation :
Returns the row as a JSON object. Line feeds will be added between
level-1 elements if pretty_bool is true.
For the query :
select
row_to_json(tbl)
from
(select * from tbl) as tbl;
You can check here in DEMO

use JSON_EXTRACT, JSON_SET, JSON_REPLACE, JSON_INSERT in json array

I have json type column in mysql named names and its a simple json array (not key/value). i couldn't find any example of using JSON_EXTRACT, JSON_SET, JSON_REPLACE, JSON_INSERT for simple json array field.
I know there are other ways to manipulate a json array in json field type, but is it possible to use these functions for json array?
for example, name field contains ["A","B","C"], how can I use these functions to perform an update, insert and delete on this json?
update
query must execute from a php script
The functions you refer to all work exactly as expected and described in the manual; that is to say JSON_SET will insert or replace if a value already exists, JSON_INSERT will insert if a value doesn't already exist, and JSON_REPLACE will replace a pre-existing value. You can use JSON_ARRAY_INSERT and JSON_ARRAY_APPEND to more easily add values to a JSON array.
-- extract second element
select json_extract('["A", "B", "C"]', '$[1]')
-- "B"
-- replace second element
select json_set('["A", "B", "C"]', '$[1]', 'D')
-- ["A", "D", "C"]
-- insert fourth element
select json_set('["A", "B", "C"]', '$[3]', 'E')
-- ["A", "B", "C", "E"]
-- attempt to insert second element fails as it already exists
select json_insert('["A", "B", "C"]', '$[1]', 'F')
-- ["A", "B", "C"]
-- use json_array_insert to insert a new second element and move the other elements right
select json_array_insert('["A", "B", "C"]', '$[1]', 'F')
-- ["A", "F", "B", "C"]
-- insert fourth element
select json_insert('["A", "B", "C"]', '$[3]', 'F')
-- ["A", "B", "C", "F"]
-- or use json_array_append to add an element at the end
select json_array_append('["A", "B", "C"]', '$', 'F')
-- ["A", "B", "C", "F"]
-- replace second element
select json_replace('["A", "B", "C"]', '$[1]', 'G')
-- ["A", "G", "C"]
-- attempt to replace non-existing element fails
select json_replace('["A", "B", "C"]', '$[3]', 'G')
-- ["A", "B", "C"]
Demo on dbfiddle
To use these functions on a column in a table, simply replace the ["A", "B", "C"] in the above calls with the column name, for example:
create table test (j json);
insert into test values ('["A", "B", "C"]');
select json_array_insert(j, '$[1]', 'F')
from test
-- ["A", "F", "B", "C"]
Demo on dbfiddle
i think found the solution
for json array, it's not possible to use JSON_EXTRACT, JSON_SET, JSON_REPLACE, JSON_INSERT by array values, and you have to know the place of each value in json array (in my opinion it's a weakness).
for example to select 2nd value you can use $[1],
but for insert values you can use
JSON_ARRAY_APPENDand JSON_ARRAY_INSERT

How can I merge records inside two JSON arrays?

I have two Postgres SQL queries returning JSON arrays:
q1:
[
{"id": 1, "a": "text1a", "b": "text1b"},
{"id": 2, "a": "text2a", "b": "text2b"},
{"id": 2, "a": "text3a", "b": "text3b"},
...
]
q2:
[
{"id": 1, "percent": 12.50},
{"id": 2, "percent": 75.00},
{"id": 3, "percent": 12.50}
...
]
I want the result to be a union of both array unique elements:
[
{"id": 1, "a": "text1a", "b": "text1b", "percent": 12.50},
{"id": 2, "a": "text2a", "b": "text2b", "percent": 75.00},
{"id": 3, "a": "text3a", "b": "text3b", "percent": 12.50},
...
]
How can this be done with SQL in Postgres 9.4?
Assuming data type jsonb and that you want to merge records of each JSON array that share the same 'id' value.
Postgres 9.5
makes it simpler with the new concatenate operator || for jsonb values:
SELECT json_agg(elem1 || elem2) AS result
FROM (
SELECT elem1->>'id' AS id, elem1
FROM (
SELECT '[
{"id":1, "percent":12.50},
{"id":2, "percent":75.00},
{"id":3, "percent":12.50}
]'::jsonb AS js
) t, jsonb_array_elements(t.js) elem1
) t1
FULL JOIN (
SELECT elem2->>'id' AS id, elem2
FROM (
SELECT '[
{"id": 1, "a": "text1a", "b": "text1b", "percent":12.50},
{"id": 2, "a": "text2a", "b": "text2b", "percent":75.00},
{"id": 3, "a": "text3a", "b": "text3b", "percent":12.50}]'::jsonb AS js
) t, jsonb_array_elements(t.js) elem2
) t2 USING (id);
The FULL [OUTER] JOIN makes sure you don't lose records without match in the other array.
The type jsonb has the convenient property to only keep the latest value for each key in the record. Hence, the duplicate 'id' key in the result is merged automatically.
The Postgres 9.5 manual also advises:
Note: The || operator concatenates the elements at the top level of
each of its operands. It does not operate recursively. For example, if
both operands are objects with a common key field name, the value of
the field in the result will just be the value from the right hand operand.
Postgres 9.4
Is a bit less convenient. My idea would be to extract array elements, then extract all key/value pairs, UNION both results, aggregate into a single new jsonb values per id value and finally aggregate into a single array.
SELECT json_agg(j) -- ::jsonb
FROM (
SELECT json_object_agg(key, value)::jsonb AS j
FROM (
SELECT elem->>'id' AS id, x.*
FROM (
SELECT '[
{"id":1, "percent":12.50},
{"id":2, "percent":75.00},
{"id":3, "percent":12.50}]'::jsonb AS js
) t, jsonb_array_elements(t.js) elem, jsonb_each(elem) x
UNION ALL -- or UNION, see below
SELECT elem->>'id' AS id, x.*
FROM (
SELECT '[
{"id": 1, "a": "text1a", "b": "text1b", "percent":12.50},
{"id": 2, "a": "text2a", "b": "text2b", "percent":75.00},
{"id": 3, "a": "text3a", "b": "text3b", "percent":12.50}]'::jsonb AS js
) t, jsonb_array_elements(t.js) elem, jsonb_each(elem) x
) t
GROUP BY id
) t;
The cast to jsonb removes duplicate keys. Alternatively you could use UNION to fold duplicates (for instance if you want json as result). Test which is faster for your case.
Related:
How to turn json array into postgres array?
Merging Concatenating JSON(B) columns in query
For any single jsonb element this use of the concat || operator works well for me with strip_nulls and another trick to cast the result back to jsonb (not an array).
select jsonb_array_elements(jsonb_strip_nulls(jsonb_agg(
'{
"a" : "unchanged value",
"b" : "old value",
"d" : "delete me"
}'::jsonb
|| -- The concat operator works as merge on jsonb, the right operand takes precedence
-- NOTE: it only works one JSON level deep
'{
"b" : "NEW value",
"c" : "NEW field",
"d" : null
}'::jsonb
)));
This gives the result
{"a": "unchanged value", "b": "NEW value", "c": "NEW field"}
which is properly typed jsonb