I am struggling to parse a JSON having more than 32767 keys
The format of json is shown below.
The idea is to fetch all the keys and value into a temp table , but due to length limitation unable to do so
{"1335":"345435sd","8989SD":"jddk8","dDDSF","87868658"......}
declare
j JSON_OBJECT_T;
i NUMBER;
k JSON_KEY_LIST;
arr JSON_ARRAY_T;
v_key varchar2(2000);
v_value varchar2(2000);
CURSOR c_json IS
select treat(col_clob as json) myJsonCol from t_clob; -- the data is stored as clob and it need to be
begin
FOR rec IN c_json
LOOP
j := JSON_OBJECT_T.parse(rec.myJsonCol);
k := j.get_keys;
FOR i in 1..k.COUNT
LOOP
dbms_output.put_line(k(i) || ' ' || j.get_String(k(i)));
v_key :=k(i);
v_value :=j.get_String(k(i));
-- insert into temp(c1,c2) values(v_key,v_value);
END LOOP;
END LOOP;
END;
/
::Oracle Error: ORA-40684: maximum number of key names exceeded::
Reference - https://docs.oracle.com/en/database/oracle/oracle-database/12.2/adjsn/oracle-json-restrictions.html#GUID-1DB81125-54A7-4CB6-864B-78E0E7E407C9
PL/SQL getter method JSON_OBJECT_T.get_keys() returns at most 32767 field names for a given JSON object. An error is raised if it is applied to an object with more than 32767 fields.
I am trying to convert a json array to json clob and the convert it object type in an Oracle stored procedure.
Below is the object type I have in Oracle.
create or replace TYPE REPORT_OBJ FORCE as OBJECT (
id NUMBER,
name NUMBER,
createDt Date,
value NUMBER(10,2)
);
create or replace TYPE REPORT_OBJ_LIST as TABLE OF REPORT_OBJ;
This is my json array:
[{"id":1,"name":"john",:"createDt":"01-jan-2020","value":10},
{"id":2,"name":"crystal","createDt":"01-feb-2020","value":20},
{"id":3,"name":"bob","createDt":"01-mar-2020","value":30}]
This is my stored procedure which takes report_obj_list as input parameter
create or replace PROCEDURE SaveUpdate_ReportsData(reportList IN REPORT_OBJ_LIST)
AS v_count number;
v_column REPORTS_DATA.id%TYPE;
updatedRecs Number;
recsCount Number;
dbid REPORTS_DATA.Id%TYPE;
dbname REPORTS_DATA.name%TYPE;
dbcreateDt REPORTS_DATA.createDt%TYPE;
dbvalue REPORTS_DATA.value%TYPE;
BEGIN
recsCount := 0;
updatedRecs := 0;
for i in reportList.first..reportList.last loop
v_column := 0;
dbid := 0;
dbname := 0;
dbcreateDt := sysdate;
dbvalue := 0;
BEGIN
SELECT DISTINCT NVL(b.repId,0) into v_column from (
(SELECT 'TEMP' as temp from REPORTS_DATA) a left join (
SELECT DISTINCT 'TEMP' AS temp, NVL(id,0) as repId FROM REPORTS_DATA
where createDt = reportList(i).createDt ) b on a.temp = b.temp);
if(v_column <= 0 ) then
INSERT INTO REPORTS_DATA (Id,name,createDt,value)
VALUES (reportList(i).Id,reportList(i).name, reportList(i).createDt,
reportList(i).value);
updatedRecs := updatedRecs+1;
else
updatedRecs := updatedRecs+1;
SELECT id,name,createDt,value INTO
dbid,dbname,dbcreateDt,dbvalue
FROM REPORTS_DATA
where createDt = reportList(i).createDt;
update REPORTS_DATA set id = NVL(reportList(i).id,dbid),
name = NVL(reportList(i).name,dbname) ,
createDt = NVL(reportList(i).createDt,dbcreateDt),
value = NVL(reportList(i).value, dbvalue);
end if;
EXCEPTION
WHEN NO_DATA_FOUND THEN
v_column := null;
DBMS_OUTPUT.PUT_LINE('hello' || v_column);
END;
end loop;
Commit;
recsCount:= updatedRecs ;
DBMS_OUTPUT.PUT_LINE('HELOOwq ' || recsCount);
end SaveUpdate_ReportsData ;
below is the oracle table
create table REPORTS_DATA(
id number,
name varchar(200),
createdt date,
value number(10,2)
);
From java, I have to convert jsonarray as clob (so that it can accept large amount of data as input to stored procedure), and the stored procedure should accept json array of clob and convert it to 'Report_obj_list', and from there the existing stored procedure will work fine. I have written the stored procedure which accepts object but i need to make changes so that it accepts clob json array and converts that to object inside the stored procedure.
Updated stored procedure
create or replace PROCEDURE SaveUpdate_ReportsData(intnum in Number)
AS v_count number;
jstr clob;
reportList report_obj_list;
v_column REPORTS_DATA.id%TYPE;
dbid REPORTS_DATA.Id%TYPE;
dbname REPORTS_DATA.name%TYPE;
dbcreateDt REPORTS_DATA.createDt%TYPE;
dbvalue REPORTS_DATA.value%TYPE;
BEGIN
jstr := to_clob('[{"id":1,"name":"john","createDt":"01-jan-2020","value":10},
{"id":2,"name":"crystal","createDt":"01-feb-2020","value":20},
{"id":3,"name":"bob","createDt":"01-mar-2020","value":30}]');
select report_obj(id, name, to_date(createdt, 'dd-mon-yyyy'), value)
bulk collect into reportList
from json_table(jstr, '$[*]'
columns( id number path '$.id',
name varchar2(20) path '$.name',
createdt varchar2(11) path '$.createDt',
value number(10, 2) path '$.value'
)
);
for i in reportList.first..reportList.last loop
DBMS_OUTPUT.PUT_LINE('name_ ' || reportList(i).name);
v_column := 0;
dbid := 0;
dbname := 0;
dbcreateDt := sysdate;
dbvalue := 0;
BEGIN
SELECT DISTINCT NVL(b.repId,0) into v_column from (
(SELECT 'TEMP' as temp from REPORTS_DATA) a left join (
SELECT DISTINCT 'TEMP' AS temp, NVL(id,0) as repId FROM REPORTS_DATA
where createDt = reportList(i).createDt ) b on a.temp = b.temp);
if(v_column <= 0 ) then
INSERT INTO REPORTS_DATA (Id,name,createDt,value)
VALUES (reportList(i).Id,reportList(i).name, reportList(i).createDt,
reportList(i).value);
else
SELECT id,name,createDt,value INTO
dbid,dbname,dbcreateDt,dbvalue
FROM REPORTS_DATA
where createDt = reportList(i).createDt;
update REPORTS_DATA set id = NVL(reportList(i).id,dbid),
name = NVL(reportList(i).name,dbname) ,
createDt = NVL(reportList(i).createDt,dbcreateDt),
value = NVL(reportList(i).value, dbvalue);
end if;
EXCEPTION
WHEN NO_DATA_FOUND THEN
v_column := null;
END;
end loop;
Commit;
end SaveUpdate_ReportsData ;
and i am calling the stored procedure as below:
DECLARE
BEGIN
SaveUpdate_ReportsData(
12
);
END;
/
It's not throwing any type of error, but at the same time its not inserting the data into the REPORTS_DATA table not even printing the name.
Help me solve the problem.
Thanks in advance.
Here is how you can extract the data from a JSON (in CLOB data type) and pass it to a collection (nested table) of objects of user-defined type, like you have. The PL/SQL code is for a function that accepts a CLOB (assumed to be valid JSON) and returns a nested table of objects. (Then I show one example of invoking the function in SQL, to see what was saved in it.)
Not sure what you mean by converting your JSON array to CLOB. As far as Oracle is concerned, the JSON is a CLOB.
Anyway - here is the function:
create or replace type report_obj force as object (
id number,
name_ varchar2(20),
createdt date,
value_ number(10,2)
);
/
create or replace type report_obj_list as table of report_obj;
/
create or replace function json_to_obj_list (jstr clob)
return report_obj_list
as
lst report_obj_list;
begin
select report_obj(id, name_, to_date(createdt, 'dd-mon-yyyy'), value_)
bulk collect into lst
from json_table(jstr, '$[*]'
columns( id number path '$.id',
name_ varchar2(20) path '$.name',
createdt varchar2(11) path '$.createDt',
value_ number(10, 2) path '$.value'
)
);
return lst;
end;
/
(As you can see, I changed your object type definition - I changed the attribute names name and value to name_ and value_, because name and value are Oracle keywords so they shouldn't be used as identifiers.)
And here is how this works. Note that I am passing an explicit CLOB to the function. More likely, you will want to store your CLOBs somewhere (table?) and pass them from there. That part is relatively trivial.
select * from json_to_obj_list(
to_clob(
'[{"id":1,"name":"john","createDt":"01-jan-2020","value":10},
{"id":2,"name":"crystal","createDt":"01-feb-2020","value":20},
{"id":3,"name":"bob","createDt":"01-mar-2020","value":30}]')
);
ID NAME_ CREATEDT VALUE_
---------- -------------------- ----------- ----------
1 john 01-jan-2020 10
2 crystal 01-feb-2020 20
3 bob 01-mar-2020 30
Note that createdt is in fact date data type; in the output it looks like your inputs only because I intentionally set my nls_date_format to match it. Otherwise your query will return the dates in that column in whatever format is the default for your session.
Instead of storing a MD5 hash in a 32-byte field, I will like to store it in a 16-byte binary field. Mysql field "TEMP_MD5" is defined as Binary(16).
The MySQL CREATE TABLE with a sample row insert is:
CREATE TABLE `mytable` (
`TEMP_MD5` binary(16) DEFAULT NULL,
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO mytable (TEMP_MD5) VALUES UNHEX("202cb962ac59075b964b07152d234b70") );
The sample code:
Let's say after the 16-byte binary field has been stored in the MySQL field TEMP_MD5, how do I compare this 16-byte field in Delphi code after I retrieve the value?
Is it possible to skip MySQL HEX/UNHEX internal functions, and just use Delphi code to compare the 16-byte binary field (32-byte string) in MySQL?
For example :
FDQuery1.Open( 'SELECT TEMP_MD5 from mytable;' );
if THashMD5.GetHashBytes('123') = fDQuery1.FieldByName('TEMP_MD5').VALUE then
SHOWMESSAGE('MATCHED!');
However, it seems that the values for FieldByName('TEMP_MD5').value never matched the THashMD5.GetHashString('123') value
and another way of comparing by using SELECT statement also failed
FDQuery1.Open( 'SELECT TEMP_MD5 mytable ' +
'WHERE (TEMP_MD5=:myvalue)',
[THashMD5.GetHashBytes('123')] );
above also failed to give FDQuery1.RecordCount = 1.
Basically I'm trying to compare the 16-byte Binary I stored in MySQL to a value, let's say '123' in code to see if both matches.
I'm using Delphi 10.2 moving to 10.4 next year.
Here is an example of code showing how to write an MD5 into your database and how to read it back and compare with a given MD5 hash:
Inserting data:
procedure TForm1.InsertDataButtonClick(Sender: TObject);
var
MD5 : TArray<Byte>;
begin
MD5 := THashMD5.GetHashBytes('123');
FDConnection1.Connected := TRUE;
FDQuery1.SQL.Text := 'INSERT INTO mytable (TEMP_MD5) VALUES(:MD5)';
FDQuery1.ParamByName('MD5').SetBlobRawData(Length(MD5), PByte(MD5));
FDQuery1.ExecSQL;
Memo1.Lines.Add('Rows affected = ' + FDQuery1.RowsAffected.ToString);
end;
Reading data back and comparing with given hash:
procedure TForm1.ReadDataButtonClick(Sender: TObject);
var
MD5 : TArray<Byte>;
MD5_123 : TArray<Byte>;
FieldMD5 : TField;
RecCnt : Integer;
begin
MD5_123 := THashMD5.GetHashBytes('123');
FDConnection1.Connected := TRUE;
// First version: get all records
// FDQuery1.SQL.Text := 'SELECT TEMP_MD5 FROM mytable';
// Second version: Get only records where TEMP_MD5 is hash('123').
FDQuery1.SQL.Text := 'SELECT TEMP_MD5 FROM mytable WHERE TEMP_MD5 = :MD5';
FDQuery1.ParamByName('MD5').SetBlobRawData(Length(MD5_123), PByte(MD5_123));
// Execute the query
FDQuery1.Open;
RecCnt := 0;
while not FDQuery1.Eof do begin
Inc(RecCnt);
FieldMD5 := FDQuery1.FieldByName('TEMP_MD5');
SetLength(MD5, FieldMD5.DataSize);
FieldMD5.GetData(MD5);
if (Length(MD5) = Length(MD5_123)) and
(CompareMem(PByte(MD5), PByte(MD5_123), Length(MD5))) then
Memo1.Lines.Add(RecCnt.ToString + ') MD5(123) = ' + MD5ToStr(MD5))
else
Memo1.Lines.Add(RecCnt.ToString + ') ' + MD5ToStr(MD5));
FDQuery1.Next;
end;
end;
As you can see reading the code, I compare the MD5 from database with given MD5 by comparing the memory containing the values (arrays of bytes).
Utility function:
function MD5ToStr(MD5 : TArray<Byte>) : String;
var
B : Byte;
begin
Result := '';
for B in MD5 do
Result := Result + B.ToHexString(2);
end;
I'm migrating the database engine an application from MySql to SAP HANA.
I found a little trouble. I have a query like this:
Select SUBSTRING_INDEX(id, "-", -2) as prod_ref From products;
I don't know how to "translate" the function substring_index, because the initial part of the id has a variable length.
Thanks.
This can be done using a regex:
select substr_regexpr( '.*-([^-]*-[^-]*)$' in 'varia-ble---part-part1-part2' group 1) from dummy;
select substr_regexpr( '.*-([^-]*-[^-]*)$' in 'variable-part-part1-part2' group 1) from dummy;
According to the HANA 2.0 SP0 doc you could use locate with a negative offset (and then using right()), but this does not work on my system ("...feature isn't supported...")
If you execute such queries on a regular basis on lots of records I would recommend extracting the part you are interested in during ETL into a separate field. Or, alternatively fill a separate field using " GENERATED ALWAYS AS...".
I have seen it more than once, that people calculate a field like this in complex SQL queries or complex CalcViews, and then wonder why performance is bad when selecting 100 million records and filtering on the calculated field etc... Performance is usually no problem when you have aggregated your intermediate result set to a reasonable size and then apply "expensive" functions.
I don't think there is any direct function like SUBSTRING_INDEX in SAP HANA. But you have a work around alternative by creating a function to pass the input string and delimiter.
And I am assuming that -2 in SUBSTRING_INDEX and providing the solution
Reverse the string and get the position of the second delimiter, '-' in your case, into "obtainedPosition"
Now subtract that "obtainedPosition" from the length of the string.
obtainedPosition = LENGTH(id) - obtainedPosition
Using that value in the inbuilt substring function you can get the required string and return it from the function.
SELECT SCHEMA.FN_SUBSTRING_INDEX(id,obtainedPosition) INTO ReturnValue FROM DUMMY;
CREATE FUNCTION FN_SUBSTRING_INDEX
(
id VARCHAR(500),
delim VARCHAR(2)
)
RETURNS SplitString VARCHAR(500)
LANGUAGE SQLSCRIPT AS
BEGIN
DECLARE reversedString VARCHAR(500);
DECLARE charString VARCHAR(2);
DECLARE i INT := LENGTH(:id);
DECLARE len INT := LENGTH(:id);
DECLARE obtainedPosition INT := 0;
DECLARE flag INT := 0;
reversedString := '';
--loop to reverse the inputstring
WHILE :i > 0
DO
reversedString = CONCAT(:reversedString, SUBSTRING(:id,:i,1));
i := :i - 1;
END WHILE;
--loop to get the second delimiter position
i := 1;
WHILE :i <= :leng
DO
charString := '';
charString := SUBSTRING(:reversedString,i,1);
IF((:charString = :delim ) AND (:flag < 2)) THEN
BEGIN
obtainedPosition := :i;
flag := :flag + 1;
END;
END IF;
i := :i + 1;
END WHILE;
--IF condition to check if at least 2 delimiters are available, else print complete string
IF(flag = 2) THEN
obtainedPosition := :len - :obtainedPosition + 2; --2 is added to avoid the character at that position and '-' from printing
ELSE
obtainedPosition := 1;
END IF;
--SplitString contains the string's splitted return value
SELECT SUBSTRING(:id,:obtainedPosition) INTO SplitString FROM DUMMY;
END;
The above function is modified from http://www.kodyaz.com/sap-abap/sqlscript-reverse-string-function-in-sap-hana.aspx
For string functions in SAP HANA refer to this: http://www.sapstudent.com/hana/sql-string-functions-in-sap-hana/3
You can use anonymous block in SAP HANA to call and check the function
DO
BEGIN
DECLARE id VARCHAR(500) := 'Test-sam-ple-func';
DECLARE delim VARCHAR(2) := '-';
SELECT SCHEMA.FN_SUBSTRING_INDEX(id,delim) AS "SplitStringIndex" FROM DUMMY;
END;
I would be glad to know reason for a downvote. :)
For Oracle 11g doesn't support json as you know, i am creating json data using CLOB due to varchar2 (32767) size but in this time i am getting ORA-06502: PL/SQL: numeric or value error. Meanwhile, the data size is 68075. Why i am getting the error despite of Clob is support 4 gb data?
procedure course
(
p varchar2 default null
)
as
cursor cr_course(cp_param varchar2)
is
select
m.code,
m.title
from t_course m
where
and type = cp_param;
jobject clob;
jitem varchar2(200);
begin
dbms_lob.createtemporary(jobject, false);
for n in cr_course(p) loop
jitem := '{"key":"' || n.code || '", "value":"'|| n.title || '"},';
dbms_lob.append(jobject, jitem);
end loop;
htp.p(substr(jobject, 0, (length(jobject)-1)));
exception when others then
dbms_lob.freetemporary(jobject);
htp.p(sqlerrm);
end;
I think the error is because of htp.p(substr(jobject, 0, (length(jobject)-1)));
I don't think substr works on clob type. You will have to convert it to VARCHAR2 datatype to use substr on it.