How can I parse JSON string in PL/SQL - json

I want to parse a JSON string that is in the CLOB column from table Tests_1, and insert it into another table (Test_2).
How can I do this in PL/SQL without using any JSON library?
create table Tests_1
(
value CLOB
)
create table Test_2 (a date,b date,c number,d number, e number)
INSERT INTO Tests_1
(value)
VALUES
('{
"a":"01/01/2015",
"b":"31/12/2015",
"c":"11111111111",
"d":"1111111111",
"e":"1234567890"
}');

With 11.0.4 version (there is no 11.0.4 version, of course) you have at least two choices(apart from writing a parser yourself):
Depending on the version of RDBMS you are using, here are a couple of options:
First one: for Oracle 11.1.0.7 and up, install Apex 5 and use apex_json package:
-- here I have 12.1.0.1 version with version 5 of apex installed
column ora_version format a21;
column apex_version format a21;
select (select version from v$instance) as ora_version
, (select version_no from apex_release) as apex_version
from dual;
--drop table test_2;
/* our test table */
create table test_2(
c_a date,
c_b date,
c_c number,
c_d number,
c_e number
);
select * from test_2;
declare
l_json_doc clob;
begin
dbms_output.put_line('Parsing json...');
l_json_doc := '{"a":"01/01/2015","b":"31/12/2015",
"c":"11111111111","d":"1111111111",
"e":"1234567890"}';
apex_json.parse(l_json_doc);
insert into test_2(c_a, c_b, c_c, c_d, c_e)
values(apex_json.get_date(p_path=>'a', p_format=>'dd/mm/yyyy'),
apex_json.get_date(p_path=>'b', p_format=>'dd/mm/yyyy'),
to_number(apex_json.get_varchar2(p_path=>'c')),
to_number(apex_json.get_varchar2(p_path=>'d')),
to_number(apex_json.get_varchar2(p_path=>'e')));
commit;
dbms_output.put_line('Done!');
end;
/
column c_c format 99999999999;
select to_char(c_a, 'dd/mm/yyyy') as c_a
, to_char(c_b, 'dd/mm/yyyy') as c_b
, c_c
, c_d
, c_e
from test_2;
Result:
ORA_VERSION APEX_VERSION
--------------------- ---------------------
12.1.0.1.0 5.0.2.00.07
1 row selected.
Table created.
no rows selected.
Parsing json...
Done!
PL/SQL procedure successfully completed.
C_A C_B C_C C_D C_E
---------- ---------- ------------ ---------- ----------
01/01/2015 31/12/2015 11111111111 1111111111 1234567890
1 row selected.
Second one: Use opensource PL/JSON. Never used it before, so I'm taking this opportunity to try it out. It's quite similar to apex_json.
declare
l_json json; --json object
l_json_doc clob;
begin
dbms_output.put_line('Parsing json...');
-- parsing is done upon object instantiation
l_json_doc := '{"a":"01/01/2015","b":"31/12/2015",
"c":"11111111111","d":"1111111111",
"e":"1234567890"}';
l_json := json(l_json_doc);
insert into test_2(c_a, c_b, c_c, c_d, c_e)
values(to_date(l_json.get('a').get_string, 'dd-mm-yyyy'),
to_date(l_json.get('b').get_string, 'dd-mm-yyyy'),
to_number(l_json.get('c').get_string),
to_number(l_json.get('d').get_string),
to_number(l_json.get('e').get_string));
commit;
dbms_output.put_line('Done!');
end;
column c_c format 99999999999;
select to_char(c_a, 'dd/mm/yyyy') as c_a
, to_char(c_b, 'dd/mm/yyyy') as c_b
, c_c
, c_d
, c_e
from test_2;
Result:
C_A C_B C_C C_D C_E
---------- ---------- ------------ ---------- ----------
01/01/2015 31/12/2015 11111111111 1111111111 1234567890
01/01/2015 31/12/2015 11111111111 1111111111 1234567890
2 rows selected.
Introduction of json_table() in 12.1.0.2 release makes JSON parsing it a bit simpler(just for the sake of demonstration):
insert into test_2
select to_date(c_a, 'dd-mm-yyyy')
, to_date(c_b, 'dd-mm-yyyy')
, c_c
, c_d
, c_e
from json_table('{"a":"01/01/2015",
"b":"31/12/2015",
"c":"11111111111",
"d":"1111111111",
"e":"1234567890"}'
, '$'
columns (
c_a varchar2(21) path '$.a',
c_b varchar2(21) path '$.b',
c_c varchar2(21) path '$.c',
c_d varchar2(21) path '$.d',
c_e varchar2(21) path '$.e'
)) ;
result:
select *
from test_2;
C_A C_B C_C C_D C_E
----------- ----------- ---------- ---------- ----------
1/1/2015 12/31/2015 1111111111 1111111111 1234567890

Oracle 12c supports JSON
if you have an existing table simply do
ALTER TABLE table1 ADD CONSTRAINT constraint_name CHECK (your_column IS json);
SELECT t.your_column.id FROM table1 t;
Note that for some reason t nickname is necessary there
Or complete example:
CREATE TABLE json_documents (
id RAW(16) NOT NULL,
data CLOB,
CONSTRAINT json_documents_pk PRIMARY KEY (id),
CONSTRAINT json_documents_json_chk CHECK (data IS JSON)
);
INSERT INTO json_documents (id, data)
VALUES (SYS_GUID(),
'{
"FirstName" : "John",
"LastName" : "Doe",
"Job" : "Clerk",
"Address" : {
"Street" : "99 My Street",
"City" : "My City",
"Country" : "UK",
"Postcode" : "A12 34B"
},
"ContactDetails" : {
"Email" : "john.doe#example.com",
"Phone" : "44 123 123456",
"Twitter" : "#johndoe"
},
"DateOfBirth" : "01-JAN-1980",
"Active" : true
}');
SELECT a.data.FirstName,
a.data.LastName,
a.data.Address.Postcode AS Postcode,
a.data.ContactDetails.Email AS Email
FROM json_documents a;
FIRSTNAME LASTNAME POSTCODE EMAIL
--------------- --------------- ---------- -------------------------
Jayne Doe A12 34B jayne.doe#example.com
John Doe A12 34B john.doe#example.com
2 rows selected.
More info
https://oracle-base.com/articles/12c/json-support-in-oracle-database-12cr1
https://docs.oracle.com/database/122/ADJSN/using-PLSQL-object-types-for-JSON.htm#ADJSN-GUID-F0561593-D0B9-44EA-9C8C-ACB6AA9474EE

Since you specified you don't want to use any JSON library, if the format is fixed you could coerce it into something you could parse as XML, starting with stripping the curly braces, replacing the colons with equals signs, and removing the double-quotes from the first part of each name/value pair:
select regexp_replace(regexp_replace(value, '(^{|}$)'),
'^"(.*)":(".*")($|,)', '\1=\2', 1, 0, 'm')
from tests_1;
REGEXP_REPLACE(REGEXP_REPLACE(VALUE,'(^{|}$)'),'^"(.*)":(".*")($|,)','\1=\2',1,0
--------------------------------------------------------------------------------
a="01/01/2015"
b="31/12/2015"
c="11111111111"
d="1111111111"
e="1234567890"
which you can use as the attributes of a dummy XML node; convert that to XMLType and you can use XMLTable to extract the attributes:
select x.a, x.b, x.c, x.d, x.e
from tests_1 t
cross join xmltable('/tmp'
passing xmltype('<tmp ' ||regexp_replace(regexp_replace(value, '(^{|}$)'),
'^"(.*)":(".*")($|,)', '\1=\2', 1, 0, 'm') || ' />')
columns a varchar2(10) path '#a',
b varchar2(10) path '#b',
c number path '#c',
d number path '#d',
e number path '#e'
) x;
A B C D E
---------- ---------- ------------- ------------- -------------
01/01/2015 31/12/2015 11111111111 1111111111 1234567890
Then you can convert the strings to dates during insert:
insert into test_2 (a, b, c, d, e)
select to_date(x.a, 'DD/MM/YYYY'), to_date(x.b, 'DD/MM/YYYY'), x.c, x.d, x.e
from tests_1 t
cross join xmltable('/tmp'
passing xmltype('<tmp ' || regexp_replace(regexp_replace(value, '(^{|}$)'),
'^"(.*)":(".*")($|,)', '\1=\2', 1, 0, 'm') || ' />')
columns a varchar2(10) path '#a',
b varchar2(10) path '#b',
c number path '#c',
d number path '#d',
e number path '#e'
) x;
select * from test_2;
A B C D E
---------- ---------- ------------- ------------- -------------
2015-01-01 2015-12-31 11111111111 1111111111 1234567890
That will cope with some of the name/value pairs not being there, and you'll get nulls if that happens.
If all the pairs will always be there you could just tokenize the string and pull out the relevant parts:
select to_date(regexp_substr(value, '[^"]+', 1, 4), 'DD/MM/YYYY') as a,
to_date(regexp_substr(value, '[^"]+', 1, 8), 'DD/MM/YYYY') as b,
to_number(regexp_substr(value, '[^"]+', 1, 12)) as c,
to_number(regexp_substr(value, '[^"]+', 1, 16)) as d,
to_number(regexp_substr(value, '[^"]+', 1, 20)) as e
from tests_1;
A B C D E
---------- ---------- ------------- ------------- -------------
2015-01-01 2015-12-31 11111111111 1111111111 1234567890

From Oracle 18c you could use TREAT AS JSON operator:
SQL Enhancements for JSON
You can specify that a given SQL expression returns JSON data, using TREAT (... AS JSON).
TREAT (... AS JSON) lets you specify that the return value from a given SQL expression is to be treated as JSON data. Such expressions can include PL/SQL function calls and columns specified by a SQL WITH clause. New data-guide views make it easy to access path and type information for JSON fields, which is recorded for index-backed data guides. Returning generated and queried JSON data in LOB instances widens the scope of the use of relational data.
This operator provides a way to inform the database that the content of a VARCHAR2, BLOB, CLOB should be treated as containing JSON. This enables a number of useful features, including the ability to use "Simplified Syntax" on database objects that do not have an "IS JSON" constraint.
And in your example:
create table Test_1(val CLOB);
create table Test_2(a date,b date,c number,d number, e number);
INSERT INTO Test_1(val)
VALUES('{
"a":"01/01/2015",
"b":"31/12/2015",
"c":"11111111111",
"d":"1111111111",
"e":"1234567890"
}');
INSERT INTO Test_2(a,b,c,d,e)
SELECT sub.val_as_json.a,
sub.val_as_json.b,
sub.val_as_json.c,
sub.val_as_json.d,
sub.val_as_json.e
FROM (SELECT TREAT(val as JSON) val_as_json
FROM Test_1) sub;
COMMIT;
db<>fiddle demo

Related

Oracle select JSON column as key / value table [duplicate]

This question already has an answer here:
Query json dictionary data in SQL
(1 answer)
Closed 1 year ago.
In Oracle 12c, having a column with JSON data in this format:
{
"user_name": "Dave",
"phone_number": "13326415",
"married": false,
"age": 18
}
How can I select it in this format:
key val
-------------- ----------
"user_name" "Dave"
"phone_number" "13326415"
"married" "false"
"age" "18"
As stated in the comment, there is no way to get the keys of a JSON object using just SQL. With PL/SQL you can create a pipelined function to get the information you need. Below is a very simple pipelined function that will get the keys of a JSON object and print the type each key is, as well as the key name and the value.
First, you will need to create the types that will be used by the function
CREATE OR REPLACE TYPE key_value_table_rec FORCE AS OBJECT
(
TYPE VARCHAR2 (100),
key VARCHAR2 (200),
VALUE VARCHAR2 (200)
);
/
CREATE OR REPLACE TYPE key_value_table_t AS TABLE OF key_value_table_rec;
/
Next, create the pipelined function that will return the information in the format of the types defined above.
CREATE OR REPLACE FUNCTION get_key_value_table (p_json CLOB)
RETURN key_value_table_t
PIPELINED
AS
l_json json_object_t;
l_json_keys json_key_list;
l_json_element json_element_t;
BEGIN
l_json := json_object_t (p_json);
l_json_keys := l_json.get_keys;
FOR i IN 1 .. l_json_keys.COUNT
LOOP
l_json_element := l_json.get (l_json_keys (i));
PIPE ROW (key_value_table_rec (
CASE
WHEN l_json_element.is_null THEN 'null'
WHEN l_json_element.is_boolean THEN 'boolean'
WHEN l_json_element.is_number THEN 'number'
WHEN l_json_element.is_timestamp THEN 'timestamp'
WHEN l_json_element.is_date THEN 'date'
WHEN l_json_element.is_string THEN 'string'
WHEN l_json_element.is_object THEN 'object'
WHEN l_json_element.is_array THEN 'array'
ELSE 'unknown'
END,
l_json_keys (i),
l_json.get_string (l_json_keys (i))));
END LOOP;
RETURN;
EXCEPTION
WHEN OTHERS
THEN
CASE SQLCODE
WHEN -40834
THEN
--JSON format is not valid
NULL;
ELSE
RAISE;
END CASE;
END;
/
Finally, you can call the pipelined function from a SELECT statement
SELECT * FROM TABLE (get_key_value_table (p_json => '{
"user_name": "Dave",
"phone_number": "13326415",
"married": false,
"age": 18
}'));
TYPE KEY VALUE
__________ _______________ ___________
string user_name Dave
string phone_number 13326415
boolean married false
number age 18
If your JSON values are stored in a column in a table, you can view the keys/values using CROSS JOIN
WITH
sample_table (id, json_col)
AS
(SELECT 1, '{"key1":"val1","key_obj":{"nested_key":"nested_val"},"key_bool":false}'
FROM DUAL
UNION ALL
SELECT 2, '{"key3":3.14,"key_arr":[1,2,3]}' FROM DUAL)
SELECT t.id, j.*
FROM sample_table t CROSS JOIN TABLE (get_key_value_table (p_json => t.json_col)) j;
ID TYPE KEY VALUE
_____ __________ ___________ ________
1 string key1 val1
1 object key_obj
1 boolean key_bool false
2 number key3 3.14
2 array key_arr

MySQL Parse and Split JSON value

I have a column which contains a JSON value of different lengths
["The Cherries:2.50","Draw:3.25","Swansea Jacks:2.87"]
I want to split them and store into a JSON like so:
[
{
name: "The Cherries",
odds: 2.50
},
{
name: "Draw",
odds: 3.25
},
{
name: "Swansea",
odds: 2.87
},
]
What I did right now is looping and splitting them in the UI which to me is quite heavy for the client. I want to parse and split them all in a single query.
If you are running MySQL 8.0, you can use json_table() to split the original arrayto rows, and then build new objects and aggregate them with json_arrayagg().
We need a primary key column (or set of columns) so we can properly aggreate the generated rows, I assumed id:
select
t.id,
json_arrayagg(json_object(
'name', substring(j.val, 1, locate(':', j.val) - 1),
'odds', substring(j.val, locate(':', j.val) + 1)
)) new_js
from mytable t
cross join json_table(t.js, '$[*]' columns (val varchar(500) path '$')) as j
group by t.id
Demo on DB Fiddle
Sample data:
id | js
-: | :-------------------------------------------------------
1 | ["The Cherries:2.50", "Draw:3.25", "Swansea Jacks:2.87"]
Query results:
id | new_js
-: | :----------------------------------------------------------------------------------------------------------------------
1 | [{"name": "The Cherries", "odds": "2.50"}, {"name": "Draw", "odds": "3.25"}, {"name": "Swansea Jacks", "odds": "2.87"}]
You can use json_table to create rows from the json object.
Just replace table_name with your table name and json with the column that contains json
SELECT json_arrayagg(json_object('name',SUBSTRING_INDEX(person, ':', 1) ,'odds',SUBSTRING_INDEX(person, ':', -1) ))
FROM table_name,
JSON_TABLE(json, '$[*]' COLUMNS (person VARCHAR(40) PATH '$') people;
Here is a Db fiddle you can refer
https://dbfiddle.uk/?rdbms=mysql_8.0&fiddle=801de9f067e89a48d45ef9a5bd2d094a

How to construct single JSON where column1 is property name and column2 is value

In SQL Server 2016 +, how can i convert a table looking like this:
+---------+----------+
| Kee | Val |
+---------+----------+
| aaaaaa | 11111111 |
| bbbbbbb | 2222222 |
+---------+----------+
into an object looking like this:
{
"aaaaaa": "11111111",
"bbbbbbb": "2222222"
}
This is what I've tried:
CREATE TABLE #tmp
(
Kee VARCHAR(100),
Val VARCHAR(100)
)
INSERT INTO #tmp
(
Kee,
Val
)
VALUES
('aaaaaa', '11111111'),
('bbbbbbb', '2222222')
SELECT t.Kee,
t.Val
FROM #tmp AS t
FOR JSON AUTO
DROP TABLE #tmp
But it gives:
[
{
"Kee": "aaaaaa",
"Val": "11111111"
},
{
"Kee": "bbbbbbb",
"Val": "2222222"
}
]
Unfortunately, SQL Server's Json support is not that flexible.
You will have to manually construct that json, but it's quite simple using basic string concatenation techniques.
Prior to 2017 version use for xml path with stuff:
SELECT STUFF(
(
SELECT '","'+ t.Kee +'":"'+ t.Val
FROM #tmp AS t
FOR XML PATH('')
), 1, 2, '{') + '}' As JsonResult
In 2017 SQL Server finally introduced a built-in string_agg function, making the code required to get that result much simpler:
SELECT '{"' + STRING_AGG(t.Kee +'":"'+ t.Val, '","') +'"}'
FROM #tmp As t
Result (in both cases):
{"aaaaaa":"11111111","bbbbbbb":"2222222"}

Extract values from Postgres JSONB column

I have a JSONB column called metrics in a table events. It stores various metrics as a flat hash, e.g.
{"m1": 123, "m2": 122.3, "m3": 32}
I would like to extract all the values stored in that column. Is it possible? I have found a function jsonb_object_keys(jsonb), but I failed to find anything similar for values.
Use jsonb_each() for this purpose:
WITH json_test(data) AS ( VALUES
('{"m1": 123, "m2": 122.3, "m3": 32}'::JSONB)
)
SELECT element.value
FROM json_test jt, jsonb_each(jt.data) as element;
Output:
value
-------
123
122.3
32
(3 rows)
Use jsonb_each() in a lateral join:
with val as (
select '{"m1": 123, "m2": 122.3, "m3": 32}'::jsonb js
)
select key, value
from val,
lateral jsonb_each(js);
key | value
-----+-------
m1 | 123
m2 | 122.3
m3 | 32
(3 rows)
Using json_each you can extract the values with:
SELECT value FROM json_each('{"m1": 123, "m2": 122.3, "m3": 32}')
Output
value
-----
123
122.3
32

Automatically creating a table and inserting data from json file

I have nearly 50 json files. I want to create Postgres database based on these files. Each file contains data of one table. The files are not very large (at maximum several thousand records). Example data from customers.json (in fact there are more fields, I have simplified it):
[
{
"Id": 55948,
"FullName": "Full name #1",
"Address": "Address #1",
"Turnover": 120400.5,
"DateOfRegistration": "2014-02-13",
"LastModifiedAt": "2015-11-03 12:04:44" },
{
"Id": 55949,
"FullName": "Full name %2",
"Address": "Address #2",
"Turnover": 120000.0,
"DateOfRegistration": "2012-12-01",
"LastModifiedAt": "2015-11-04 17:14:21" }
]
I try to write a function which creates a table and inserts all the data to it. My attempt is based on dynamic query using EXECUTE:
CREATE OR REPLACE FUNCTION import_json(table_name text, data json)
RETURNS VOID AS $$
DECLARE
query text;
colname text;
BEGIN
query := 'CREATE TABLE ' || table_name || ' (';
FOR colname IN SELECT json_object_keys(data->0)
LOOP query := query || lower(colname) || ' text,';
END LOOP;
query := rtrim(query, ',') || ');';
EXECUTE(query);
END $$ LANGUAGE plpgsql;
My function creates a table with expected column names, but all columns are of type text. The problem is that I do not know how to define proper types of columns.
The json files are formatted well and contain integer, numeric, date, timestamp and text values. I would like to get the table:
CREATE TABLE customers (
id integer,
fullname text,
address text,
turnover numeric,
date_of_registration date,
last_modified_at timestamp);
The main question: how can I recognize types of columns in generated table?
Additionally, is there an easy way to transform Pascal to underscore notation ("DateOfRegistration" -> "date_of_registration")?
You can determine the type of a column by examining a value.
The function below formats column's definition from a pair (key, value).
It uses regex pattern matching.
It also transforms column's name to the notation with underscores (using regexp_replace() function).
Of course, the function wont work properly if the value represents NULL, so you have to check that the first json record has all not-null values.
create or replace function format_column(ckey text, cval text)
returns text language sql immutable as $$
select format('%s %s',
lower(regexp_replace(ckey, '(.)([A-Z])', '\1_\2', 'g')),
case
when cval ~ '^[\+-]{0,1}\d+$' then 'integer'
when cval ~ '^[\+-]{0,1}\d*\.\d+$' then 'numeric'
when cval ~ '^"\d\d\d\d-\d\d-\d\d"$' then 'date'
when cval ~ '^"\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d"$' then 'timestamp'
else 'text'
end
)
$$;
select format_column(key, value)
from (
values
('Id', '55948'),
('FullName', '"Full name #1"'),
('Turnover', '120400.5'),
('DateOfRegistration', '"2014-02-13"')
) val(key, value);
format_column
---------------------------
id integer
full_name text
turnover numeric
date_of_registration date
(4 rows)
In the main function you do not need variables or loops.
Use format() function to format strings with parameters and string_agg() to create textual lists.
Since you need both keys and values, use json_each() instead of json_object_keys(). In the second query you can use row_number() to ensure that the aggregated list of values is divided for consecutive records.
create or replace function import_table(table_name text, jdata json)
returns void language plpgsql as $$
begin
execute format('create table %s (%s)', table_name, string_agg(col, ', '))
from (
select format_column(key::text, value::text) col
from json_each(jdata->0)
) sub;
execute format('insert into %s values %s', table_name, string_agg(val, ','))
from (
with lines as (
select row_number() over () rn, line
from (
select json_array_elements(jdata) line
) sub
)
select rn, format('(%s)', string_agg(value, ',')) val
from (
select rn, format('%L', trim(value::text, '"')) as value
from lines, json_each(line)
) sub
group by 1
) sub;
end $$;
Test:
select import_table('customers',
'[{ "Id": 55948,
"FullName": "Full name #1",
"Address": "Address #1",
"Turnover": 120400.5,
"DateOfRegistration": "2014-02-13",
"LastModifiedAt": "2015-11-03 12:04:44" },
{ "Id": 55949,
"FullName": "Full name %2",
"Address": "Address #2",
"Turnover": 120000.0,
"DateOfRegistration": "2012-12-01",
"LastModifiedAt": "2015-11-04 17:14:21" }]');
\d customers
Table "public.customers"
Column | Type | Modifiers
----------------------+-----------------------------+-----------
id | integer |
full_name | text |
address | text |
turnover | numeric |
date_of_registration | date |
last_modified_at | timestamp without time zone |
select * from customers;
id | full_name | address | turnover | date_of_registration | last_modified_at
-------+--------------+------------+----------+----------------------+---------------------
55948 | Full name #1 | Address #1 | 120400.5 | 2014-02-13 | 2015-11-03 12:04:44
55949 | Full name %2 | Address #2 | 120000.0 | 2012-12-01 | 2015-11-04 17:14:21
(2 rows)