I am using bulkCreate and uupdate
const item = await models.Gsdatatab.bulkCreate(gsdatamodel,{updateOnDuplicate: ["SCRIP","LTP","OHL","ORB15","ORB30","PRB","CAMARILLA"]});
I see the timestamps(createdAt and updatedAt) are not getting updated in DB after the the update. Do I need to explicitly pass those two in the bulKCreate to get them updated each time there is an update or is there any option I am missing. Also the id is getting incremented while rows are getting updated. I dont want the id column to auto increment in case of update.
I am using the extended model creation for defining the model
The following was run using
MySQL Server version: 8.0.25 MySQL Community Server
Sequelize version 6.6.5
Summary
Timestamps:
The values returned from the .bulkCreate method can be misleading. You will need to query for the items after doing a bulkUpdate to find the new values. To quote the sequelize docs for version 6:
The success handler is passed an array of instances, but please notice
that these may not completely represent the state of the rows in the
DB. This is because MySQL and SQLite do not make it easy to obtain
back automatically generated IDs and other default values in a way
that can be mapped to multiple records. To obtain Instances for the
newly created values, you will need to query for them again.
Also, to update the updatedAt column, it will need to be included in the array parameter for updateOnDuplicate. Otherwise, it will not receive a new timestamp.
Non-sequential primary keys: The next auto_increment value for the MySQL primary key appears to be incremented when an update is being done. I'm not really sure if there's a way to prevent this from happening. However, it is still possible to insert rows that have primary keys which have been skipped over by the auto_increment mechanism. Also, according to another answer on stackoverflow concerning non-sequential primary keys, there should be no impact on efficiency. As an alternative, bulkCreate statements could be separated into two groups, one for inserts and one for updates, which could then be done separately using sequelize. The downside is that there would be extra queries to determine whether incoming data already exists in the database in order to decide between inserts versus updates.
Here's a code sample:
let {
Sequelize,
DataTypes,
} = require('sequelize')
async function run () {
let sequelize = new Sequelize(process.env.DB_NAME, process.env.DB_USER, process.env.DB_PASSWORD, {
host: 'localhost',
dialect: 'mysql',
logging: console.log
})
let Item = sequelize.define('item', {
name: DataTypes.STRING,
age: DataTypes.INTEGER
}, {
tableName: 'items',
schema: 'agw_queries'
})
await sequelize.sync({ force: true })
let wait = sec => new Promise( res => setTimeout(res, sec * 1000));
let items = await Item.bulkCreate([{ name: 'mickey', age: 32 }, { name: 'minnie', age: 30 }])
console.log()
console.log('These values are returned upon creation.')
console.log()
console.log(JSON.stringify(items, null, 2))
console.log()
console.log('These values are returned after a subsequent query.')
console.log()
let r = await Item.findAll({})
console.log(JSON.stringify(r, null, 2))
console.log()
console.log('Waiting two seconds ...')
console.log()
await wait(2)
console.log('These values are returned after an update.')
console.log()
items = await Item.bulkCreate(
[
{ id: 1, name: 'mickey mouse', age: 33 },
{ id: 2, name: 'minnie mouse', age: 31 },
{ name: 'goofy', age: 37 }
],
{ updateOnDuplicate: [ 'name', 'updatedAt' ] })
console.log(JSON.stringify(items, null, 2))
console.log()
console.log('These values are returned after another subsequent query.')
console.log()
r = await Item.findAll({})
console.log(JSON.stringify(r, null, 2))
console.log()
console.log('Waiting two seconds ...')
console.log()
await wait(2)
console.log('These values are returned after an update.')
console.log()
items = await Item.bulkCreate(
[
{ id: 1, name: 'mickey t. mouse', age: 33 },
{ id: 2, name: 'minerva mouse', age: 31 },
{ name: 'donald duck', age: 32 }
],
{ updateOnDuplicate: [ 'name', 'updatedAt' ] })
console.log(JSON.stringify(items, null, 2))
console.log()
console.log('These values are returned after another subsequent query.')
console.log()
r = await Item.findAll({})
console.log(JSON.stringify(r, null, 2))
await sequelize.close()
}
run()
And here's the output
Executing (default): DROP TABLE IF EXISTS `items`;
Executing (default): DROP TABLE IF EXISTS `items`;
Executing (default): CREATE TABLE IF NOT EXISTS `items` (`id` INTEGER NOT NULL auto_increment , `name` VARCHAR(255), `age` INTEGER, `createdAt` DATETIME NOT NULL, `updatedAt` DATETIME NOT NULL, PRIMARY KEY (`id`)) ENGINE=InnoDB;
Executing (default): SHOW INDEX FROM `items`
Executing (default): INSERT INTO `items` (`id`,`name`,`age`,`createdAt`,`updatedAt`) VALUES (NULL,'mickey',32,'2021-09-06 12:17:44','2021-09-06 12:17:44'),(NULL,'minnie',30,'2021-09-06 12:17:44','2021-09-06 12:17:44');
These values are returned upon creation.
[
{
"id": 1,
"name": "mickey",
"age": 32,
"createdAt": "2021-09-06T12:17:44.042Z",
"updatedAt": "2021-09-06T12:17:44.042Z"
},
{
"id": 2,
"name": "minnie",
"age": 30,
"createdAt": "2021-09-06T12:17:44.042Z",
"updatedAt": "2021-09-06T12:17:44.042Z"
}
]
These values are returned after a subsequent query.
Executing (default): SELECT `id`, `name`, `age`, `createdAt`, `updatedAt` FROM `items` AS `item`;
[
{
"id": 1,
"name": "mickey",
"age": 32,
"createdAt": "2021-09-06T12:17:44.000Z",
"updatedAt": "2021-09-06T12:17:44.000Z"
},
{
"id": 2,
"name": "minnie",
"age": 30,
"createdAt": "2021-09-06T12:17:44.000Z",
"updatedAt": "2021-09-06T12:17:44.000Z"
}
]
Waiting two seconds ...
These values are returned after an update.
Executing (default): INSERT INTO `items` (`id`,`name`,`age`,`createdAt`,`updatedAt`) VALUES (1,'mickey mouse',33,'2021-09-06 12:17:46','2021-09-06 12:17:46'),(2,'minnie mouse',31,'2021-09-06 12:17:46','2021-09-06 12:17:46'),(NULL,'goofy',37,'2021-09-06 12:17:46','2021-09-06 12:17:46') ON DUPLICATE KEY UPDATE `name`=VALUES(`name`),`updatedAt`=VALUES(`updatedAt`);
[
{
"id": 1,
"name": "mickey mouse",
"age": 33,
"createdAt": "2021-09-06T12:17:46.174Z",
"updatedAt": "2021-09-06T12:17:46.174Z"
},
{
"id": 2,
"name": "minnie mouse",
"age": 31,
"createdAt": "2021-09-06T12:17:46.174Z",
"updatedAt": "2021-09-06T12:17:46.174Z"
},
{
"id": 5,
"name": "goofy",
"age": 37,
"createdAt": "2021-09-06T12:17:46.174Z",
"updatedAt": "2021-09-06T12:17:46.174Z"
}
]
These values are returned after another subsequent query.
Executing (default): SELECT `id`, `name`, `age`, `createdAt`, `updatedAt` FROM `items` AS `item`;
[
{
"id": 1,
"name": "mickey mouse",
"age": 32,
"createdAt": "2021-09-06T12:17:44.000Z",
"updatedAt": "2021-09-06T12:17:46.000Z"
},
{
"id": 2,
"name": "minnie mouse",
"age": 30,
"createdAt": "2021-09-06T12:17:44.000Z",
"updatedAt": "2021-09-06T12:17:46.000Z"
},
{
"id": 3,
"name": "goofy",
"age": 37,
"createdAt": "2021-09-06T12:17:46.000Z",
"updatedAt": "2021-09-06T12:17:46.000Z"
}
]
Waiting two seconds ...
These values are returned after an update.
Executing (default): INSERT INTO `items` (`id`,`name`,`age`,`createdAt`,`updatedAt`) VALUES (1,'mickey t. mouse',33,'2021-09-06 12:17:48','2021-09-06 12:17:48'),(2,'minerva mouse',31,'2021-09-06 12:17:48','2021-09-06 12:17:48'),(NULL,'donald duck',32,'2021-09-06 12:17:48','2021-09-06 12:17:48') ON DUPLICATE KEY UPDATE `name`=VALUES(`name`),`updatedAt`=VALUES(`updatedAt`);
[
{
"id": 1,
"name": "mickey t. mouse",
"age": 33,
"createdAt": "2021-09-06T12:17:48.258Z",
"updatedAt": "2021-09-06T12:17:48.258Z"
},
{
"id": 2,
"name": "minerva mouse",
"age": 31,
"createdAt": "2021-09-06T12:17:48.258Z",
"updatedAt": "2021-09-06T12:17:48.258Z"
},
{
"id": 8,
"name": "donald duck",
"age": 32,
"createdAt": "2021-09-06T12:17:48.258Z",
"updatedAt": "2021-09-06T12:17:48.258Z"
}
]
These values are returned after another subsequent query.
Executing (default): SELECT `id`, `name`, `age`, `createdAt`, `updatedAt` FROM `items` AS `item`;
[
{
"id": 1,
"name": "mickey t. mouse",
"age": 32,
"createdAt": "2021-09-06T12:17:44.000Z",
"updatedAt": "2021-09-06T12:17:48.000Z"
},
{
"id": 2,
"name": "minerva mouse",
"age": 30,
"createdAt": "2021-09-06T12:17:44.000Z",
"updatedAt": "2021-09-06T12:17:48.000Z"
},
{
"id": 3,
"name": "goofy",
"age": 37,
"createdAt": "2021-09-06T12:17:46.000Z",
"updatedAt": "2021-09-06T12:17:46.000Z"
},
{
"id": 6,
"name": "donald duck",
"age": 32,
"createdAt": "2021-09-06T12:17:48.000Z",
"updatedAt": "2021-09-06T12:17:48.000Z"
}
]
Related
I'm trying read data from a json document stored on azure data lake storage from an Azure SQL database using the below query. I tried a couple of ways and nothing seems to bring the data back. I tried using this '$.data[0].AccID' in OPENJSON for instance and brings back only the first record from multiple arrays but, not sure how to bring back all the data.
json:
[
{
"data": [
{
"AccID": 1234,
"CustID": 456,
"Total": 1234.1234,
"OrderDate": "2022-12-01"
},
{
"AccID": 5678,
"CustID": 890,
"Total": 5678.5678,
"OrderDate": "2022-12-01"
}
],
"count": 2
},
{
"data": [
{
"AccID": 1234,
"CustID": 456,
"Total": 100.0,
"OrderDate": "2021-12-01"
},
{
"AccID": 5678,
"CustID": 890,
"Total": 200.0,
"OrderDate": "2021-12-01"
},
{
"AccID": 8900,
"CustID": 235,
"Total": 300.0,
"OrderDate": "2021-12-01"
}
],
"count": 3
}
]
Query:
SELECT *
FROM OPENROWSET (
BULK 'blobpath/file.json',
DATA_SOURCE = 'adls',
SINGLE_CLOB
) AS [data]
CROSS APPLY OPENJSON (X.BulkColumn, '$.value')
WITH (
AccID int,
CustID int,
Total float,
OrderDate date)
I created storage account and uploaded Json file into container,
reference image.
input .json:
[
{
"data": [
{
"AccID": 1234,
"CustID": 456,
"Total": 1234.1234,
"OrderDate": "2022-12-01"
},
{
"AccID": 5678,
"CustID": 890,
"Total": 5678.5678,
"OrderDate": "2022-12-01"
}
],
"count": 2
},
{
"data": [
{
"AccID": 1234,
"CustID": 456,
"Total": 100.0,
"OrderDate": "2021-12-01"
},
{
"AccID": 5678,
"CustID": 890,
"Total": 200.0,
"OrderDate": "2021-12-01"
},
{
"AccID": 8900,
"CustID": 235,
"Total": 300.0,
"OrderDate": "2021-12-01"
}
],
"count": 3
}
]
I generated sas token and created masterkey and data source. I created a table in sql with following columns
Create table data1( ACCID varchar(100),
CustID varchar(100),
Total float(100),
OrderDate date,
count int )
Insert data into that table using below code:
INSERT INTO data1
SELECT ACCID,CustID,Total,OrderDate,count FROM OPENROWSET(
BULK 'jsonfile path',
DATA_SOURCE = 'your data source'
SINGLE_CLOB
) AS DataFile
cross apply openjson(BulkColumn)
WITH (
AccID varchar(100) '$.data[0].AccID',
CustID varchar(100) '$.data[0].CustID',
Total float '$.data[0].Total',
OrderDate date '$.data[0].OrderDate',
count int '$.count'
)
INSERT INTO data1
SELECT ACCID,CustID,Total,OrderDate,count FROM OPENROWSET(
BULK 'jsonfile path',
DATA_SOURCE = 'your data source'
SINGLE_CLOB
) AS DataFile
cross apply openjson(BulkColumn)
WITH (
AccID varchar(100) '$.data[1].AccID',
CustID varchar(100) '$.data[1].CustID',
Total float '$.data[1].Total',
OrderDate date '$.data[1].OrderDate',
count int '$.count'
)
INSERT INTO data1
SELECT ACCID,CustID,Total,OrderDate,count FROM OPENROWSET(
BULK 'jsonfile path',
DATA_SOURCE = 'your data source'
SINGLE_CLOB
) AS DataFile
cross apply openjson(BulkColumn)
WITH (
AccID varchar(100) '$.data[2].AccID',
CustID varchar(100) '$.data[2].CustID',
Total float '$.data[2].Total',
OrderDate date '$.data[2].OrderDate',
count int '$.count'
)
Data is inserted successfully.
I retrieved the data of table I got as below
I deleted the Null values row using below code
Image for reference:
delete from data1 where ACCID is Null
I retrieved the data of table order by count according to the json using below code
select * from data1 order by count
Output:
In this way I retrieve all the data from Json document.
I want to return the query result as a JSON object from the SQL server. I have below Employee and EmployeeDtl table.
DROP TABLE IF EXISTS #Employee
CREATE TABLE #Employee (EmpId bigint, EmpName varchar(2000))
INSERT INTO #Employee VALUES (1, 'A')
DROP TABLE IF EXISTS #EmployeeDtl
CREATE TABLE #EmployeeDtl (EmpId bigint, WorkObject varchar(100), LocationName varchar(100))
INSERT INTO #EmployeeDtl VALUES (1, 'AAA', 'AAAAA')
INSERT INTO #EmployeeDtl VALUES (1, 'BBB', 'BBBB')
INSERT INTO #EmployeeDtl VALUES (1, 'CCC', 'CCCC')
I want to return the data in the below format.
{
"Employees": [{
"EmpId": 1,
"EmpName": "A",
"EmpDtl": [{
"WorkObject": "AAA",
"LocationName": "AAAAA"
},
{
"WorkObject": "BBB",
"LocationName": "BBBB"
},
{
"WorkObject": "CCC",
"LocationName": "CCCC"
}
]
}]
}
I've written the below query.
SELECT
E.EmpId, E.EmpName, ED.WorkObject AS [EmpDtl.WorkObject], ED.LocationName AS [EmpDtl.LocationName]
FROM
#Employee E
INNER JOIN #EmployeeDtl ED ON E.EmpId = ED.EmpId
FOR
JSON PATH, ROOT('Employees')
But, the above query is returning data in the below format.
{
"Employees": [{
"EmpId": 1,
"EmpName": "A",
"EmpDtl": {
"WorkObject": "AAA",
"LocationName": "AAAAA"
}
}, {
"EmpId": 1,
"EmpName": "A",
"EmpDtl": {
"WorkObject": "BBB",
"LocationName": "BBBB"
}
}, {
"EmpId": 1,
"EmpName": "A",
"EmpDtl": {
"WorkObject": "CCC",
"LocationName": "CCCC"
}
}]
}
Can you please help me to understand what I'm doing wrong here.
You need to nest #EmployeeDtl as a subquery instead:
SELECT E.EmpId,
E.EmpName,
(SELECT ED.WorkObject AS [WorkObject],
ED.LocationName AS [LocationName]
FROM #EmployeeDtl ED
WHERE ED.EmpId = E.EmpId
FOR JSON PATH) AS EmpDtl
FROM #Employee E
FOR JSON PATH, ROOT('Employees');
Output:
{
"Employees": [
{
"EmpId": 1,
"EmpName": "A",
"EmpDtl": [
{
"WorkObject": "AAA",
"LocationName": "AAAAA"
},
{
"WorkObject": "BBB",
"LocationName": "BBBB"
},
{
"WorkObject": "CCC",
"LocationName": "CCCC"
}
]
}
]
}
I'm trying to write a Postgres query that will output my json data in a particular format.
JSON data structure
{
user_id: 123,
data: {
skills: {
"skill_1": {
"title": "skill_1",
"rating": 4,
"description": 'description text'
},
"skill_2": {
"title": "skill_2",
"rating": 2,
"description": 'description text'
},
"skill_3": {
"title": "skill_3",
"rating": 5,
"description": 'description text'
},
...
}
}
}
This is how I need the data to be formatted in the end:
[
{
user_id: 123,
skill_1: 4,
skill_2: 2,
skill_3: 5,
...
},
{
user_id: 456,
skill_1: 1,
skill_2: 3,
skill_3: 4,
...
}
]
So far I'm working with a query that looks like this:
SELECT
user_id,
data#>>'{skills, "skill_1", rating}' AS "skill_1",
data#>>'{skills, "skill_2", rating}' AS "skill_2",
data#>>'{skills, "skill_3", rating}' AS "skill_3"
FROM some_table
There has to be a better way to go about writing my query. There are 400+ rows and 70+ skills. My above query is a little crazy. Any guidance or help would be greatly appreciated.
Some things to note:
Users rated themselves on 70+ skills
Each skill object has the same structure
Each user rated themselves on the exact same set of skills
db<>fiddle
I expanded your test data to (note the array around all users):
[{
"user_id": 123,
"data": {
"skills": {
"skill_1": {
"title": "skill_1",
"rating": 4,
"description": "description text"
},
"skill_2": {
"title": "skill_2",
"rating": 2,
"description": "description text"
},
"skill_3": {
"title": "skill_3",
"rating": 5,
"description": "description text"
}
}
}
},
{
"user_id": 456,
"data": {
"skills": {
"skill_1": {
"title": "skill_1",
"rating": 1,
"description": "description text"
},
"skill_2": {
"title": "skill_2",
"rating": 3,
"description": "description text"
},
"skill_3": {
"title": "skill_3",
"rating": 4,
"description": "description text"
}
}
}
}]
The query:
SELECT
jsonb_pretty(jsonb_agg(user_id || skills)) -- E
FROM (
SELECT
json_build_object('user_id', user_id)::jsonb as user_id, -- D
json_object_agg(skill_title, skills -> skill_title -> 'rating')::jsonb as skills
FROM (
SELECT
user_id,
json_object_keys(skills) as skill_title, -- C
skills
FROM (
SELECT
(datasets -> 'user_id')::text as user_id,
datasets -> 'data' -> 'skills' as skills -- B
FROM (
SELECT
json_array_elements(json) as datasets -- A
FROM (
SELECT '/* the JSON data; see db<>fiddle */'::json
)s
)s
)s
)s
GROUP BY user_id
ORDER BY user_id
)s
A Make all array elements ({user_id: '42', data: {...}}) one row each
B First column safe the user_id. The cast to text ist necessary for the GROUP BY later which cannot group JSON output. For the second column extract the skills data of the user
C Extract the skill titles for using them as keys in (D.1).
D.1 skills -> skill_title -> 'rating' extracts the rating value from each skill
D.2 json_object_agg aggregates the skill_titles and each corresponding rating value into one JSON object; grouped by the user_id
D.3 json_build_object makes the user_id a JSON object again
E.1 user_id || skills aggregates the two json object into one
E.2 jsonb_agg aggregates these json objects into an array
E.3 jsonb_pretty makes the result looking pretty.
Result:
[{
"skill_1": 4,
"skill_2": 2,
"skill_3": 5,
"user_id": "123"
},
{
"skill_1": 1,
"skill_2": 3,
"skill_3": 4,
"skill_4": 42,
"user_id": "456"
}]
Sorry for the basic of this question, I just cannot wrap my head around this one.
I need the output from SQL Server to look like this.
In a little more human readable format:
var data = [
{
name: '2017', id: -1,
children: [
{ name: '01-2017', id: 11 },
{ name: '02-2017', id: 12 },
{ name: '03-2017', id: 13 },
{ name: '04-2017', id: 14 },
{ name: '05-2017', id: 15 },
]
},
{
name: '2018', id: -1,
children: [
{ name: '01-2018', id: 6 },
{ name: '02-2018', id: 7 },
{ name: '03-2018', id: 8 },
{ name: '04-2018', id: 9 },
{ name: '05-2018', id: 10 },
]
}
];
This is a snapshot of the data:
The group I will be working with is userid = 1.
My first thought was to use a cursor to loop through all the distinct reportYear for userid = 1, then a select based on the year and the userid to fill in the sub-query.
There has to be a way without using a cursor.
You can achieve the desired output joining your table to a query that extracts all the years to be used at the top level elements and then generating the json using FOR JSON AUTO:
declare #tmp table (monthlyReportID int, userID int, reportMonth int, reportYear int)
insert into #tmp values
( 6, 1, 1, 2018),
( 7, 1, 2, 2018),
( 8, 1, 3, 2018),
( 9, 1, 4, 2018),
(10, 1, 5, 2018),
(11, 1, 1, 2017),
(12, 1, 2, 2017),
(13, 1, 3, 2017),
(14, 1, 4, 2017),
(15, 1, 5, 2017)
select years.[name], children.[name], children.[id] from
(
select distinct reportYear as [name] from #tmp
) as years
left join
(
select monthlyReportID as [id]
,right('0' + cast(reportMonth as varchar(2)),2) + '-' + cast(reportYear as varchar(4)) as [name]
,reportYear as [year]
from #tmp
) as children
on children.[Year] = years.[name]
for json auto
I omitted the ID field because in your desired output it is always set to -1 and I was not able to understand the logic behind it.
Nonetheless you should be able to easily edit the script above to obtain the value you need.
Here are the results:
[
{
"name": 2017,
"children": [
{"name": "01-2017", "id": 11},
{"name": "02-2017", "id": 12},
{"name": "03-2017", "id": 13},
{"name": "04-2017", "id": 14},
{"name": "05-2017", "id": 15}
]
},
{
"name": 2018,
"children": [
{"name": "01-2018", "id": 6},
{"name": "02-2018", "id": 7},
{"name": "03-2018", "id": 8},
{"name": "04-2018", "id": 9},
{"name": "05-2018", "id": 10}
]
}
]
I have this json file.
[
{
"Modified": "2016-09-0",
"Id": 16,
"Name": "ABC",
"Filters": [],
"ScoreComponents":[
{
"Id": 86,
"Name": "Politeness",
"Bins": [],
"Ranges": [
{
"ComponentId": 86,
"LastUser": "CDE\\John.Doe"
},
{
"ComponentId": 86,
"LastUser": "CDE\\John.Doe"
}
],
"Filters": []
},
{
"Id": 87,
"Name": "Empathy",
"Bins": [],
"Ranges": [
{
"ComponentId": 87,
"LastUser": "CDE\\John.Doe"
}
],
"Filters": [
{
"ComponentID": -30356,
"BucketID": 81
}
]
},
{
"Id": 88,
"Name": "Ownership",
"Bins": [],
"Ranges": [
{
"ComponentId": 88,
"User": "CDE\\John.Doe"
}
],
"Filters": []
}]
}
]
I have loaded this file In Vertica flex table
CREATE FLEX TABLE flex_test();
copy events_stg.flex_test from LOCAL 'C:/test2.json' PARSER fjsonparser (flatten_maps= true, flatten_arrays = false)
I want to read all data from ScoreComponents including nested arrays.
I tried query this query
select "Id" as scoreid,mapitems("ScoreComponents") OVER(PARTITION BY
"Id") from flex_test
getting output like:
I just dont understand those small squares in output. I am a student and this vertica DB and Flex tables are new me.
I have tried with flatten_arrays = true but its giving me empty result set.
You're getting squares because the values field contains a binary VMap.
This should do it:
create flex table so_flex();
create table so_score_components(
id int,
name varchar(100)
);
create table so_ranges(
parent_id int,
component_id int,
last_user varchar(100)
);
create table so_filters(
parent_id int,
component_id int,
bucket_id int
);
copy so_flex from local 'E:\Demos\so.json'
parser fjsonparser(start_point='ScoreComponents',
flatten_maps = false, flatten_arrays = false);
insert into so_score_components(id, name)
select id::int, name::varchar from so_flex;
insert into so_ranges(parent_id, component_id, last_user)
select id::int, values['ComponentId']::int, values['LastUser']::varchar
from (
select id, mapitems(ranges) over (partition by id)
from so_flex
) t;
insert into so_filters(parent_id, component_id, bucket_id)
select id::int, values['ComponentID']::int, values['BucketID']::int
from (
select id, mapitems(filters) over (partition by id)
from so_flex
) t;