Related
I need help with jq syntax on how to return the Gitlab job ID if it contains an artifact. The JSON output looks like this (removed a lot of unrelated info from it and added [...]):
[{
"id": 3219589880,
"status": "success",
"stage": "test",
"name": "job_with_no_artifact",
"ref": "main",
"tag": false,
"coverage": null,
"allow_failure": false,
"created_at": "2022-10-24T18:21:25.119Z",
"started_at": "2022-10-24T18:21:25.986Z",
"finished_at": "2022-10-24T18:21:38.464Z",
"duration": 12.478682,
"queued_duration": 0.499786,
"user": {
"id": 123456789,
[...]
},
"commit": {
"id": "5e0e1f287d20daf2036a3ca71c656dce55999265",
[...]
"pipeline": {
"id": 123456789,
[...]
"project": {
"ci_job_token_scope_enabled": false
},
"artifacts": [],
"runner": {
"id": 12270859,
[...]
},
"artifacts_expire_at": null,
"tag_list": []
}, {
"id": 3219589878,
"status": "success",
"stage": "test",
"name": "create_artifact_job_2",
"ref": "main",
"tag": false,
"coverage": null,
"allow_failure": false,
"created_at": "2022-10-24T18:21:25.111Z",
"started_at": "2022-10-24T18:21:25.922Z",
"finished_at": "2022-10-24T18:21:39.090Z",
"duration": 13.168405,
"queued_duration": 0.464364,
"user": {
"id": 123456789,
[...]
},
"commit": {
"id": "5e0e1f287d20daf2036a3ca71c656dce55999265",
[...]
},
"pipeline": {
"id": 675641982,
[...],
"project": {
"ci_job_token_scope_enabled": false
},
"artifacts_file": {
"filename": "artifacts.zip",
"size": 223
},
"artifacts": [{
"file_type": "archive",
"size": 223,
"filename": "artifacts.zip",
"file_format": "zip"
}, {
"file_type": "metadata",
"size": 153,
"filename": "metadata.gz",
"file_format": "gzip"
}],
"runner": {
"id": 12270845,
[...]
},
"artifacts_expire_at": "2022-10-25T18:21:35.859Z",
"tag_list": []
}, {
"id": 3219589876,
"status": "success",
"stage": "test",
"name": "create_artifact_job_1",
"ref": "main",
"tag": false,
"coverage": null,
"allow_failure": false,
"created_at": "2022-10-24T18:21:25.103Z",
"started_at": "2022-10-24T18:21:25.503Z",
"finished_at": "2022-10-24T18:21:41.407Z",
"duration": 15.904028,
"queued_duration": 0.098837,
"user": {
"id": 123456789,
[...]
},
"commit": {
"id": "5e0e1f287d20daf2036a3ca71c656dce55999265",
[...]
},
"pipeline": {
"id": 123456789,
[...]
},
"web_url": "WEB_URL",
"project": {
"ci_job_token_scope_enabled": false
},
"artifacts_file": {
"filename": "artifacts.zip",
"size": 217
},
"artifacts": [{
"file_type": "archive",
"size": 217,
"filename": "artifacts.zip",
"file_format": "zip"
}, {
"file_type": "metadata",
"size": 152,
"filename": "metadata.gz",
"file_format": "gzip"
}],
"runner": {
"id": 12270857,
},
"artifacts_expire_at": "2022-10-25T18:21:37.808Z",
"tag_list": []
}]
I've been trying to do either of the following using jQ:
Either:
Check if artifacts_file key exists in each iteration and if it does return the (job) id (so .[].id)
Check if artifacts array is empty in each iteration and if it is empty return the (job) id.
In both cases I'm able to do the first part but I am not sure how to return the .id key.
Related stackoverflow questions that I've been trying to utilize and adapt to my case:
jq - return array value if its length is not null
How to check for presence of 'key' in jq before iterating over the values
What I have so far: jq '[.[].artifacts[]|select(length > 0)] | .[]' which returns all the artifacts found (but it doesn't contain the .id of the job).
Checking the existence of a field using has:
.[] | select(has("artifacts_file")).id
3219589878
3219589876
Demo
Checking if a field is an empty array by comparing it to []:
.[] | select(.artifacts == []).id
3219589880
Demo
I would like to use JQ to modify the following JSON input:
[
{
"description": "",
"created_on": "2021-12-27T11:32:03.171682Z",
"modified_on": "2021-12-27T11:32:03.171682Z",
"id": "test",
"enabled": true,
"minimum_origins": 1,
"monitor": "test",
"name": "test",
"notification_email": "",
"check_regions": null,
"latitude": 43.7417,
"longitude": -79.3733,
"notification_filter": {
"pool": {}
},
"healthy": true,
"origins": [
{
"name": "AAAA",
"address": "1.1.1.1",
"enabled": true,
"weight": 1,
"healthy": true,
"failure_reason": "No failures"
},
{
"name": "BBBB",
"address": "2.2.2.2",
"enabled": true,
"weight": 1,
"healthy": true,
"failure_reason": "No failures"
}
]
}
]
if address == 1.1.1.1 then weight 0
that's what i expect
[
{
"name": "test",
"origins": [
{
"name": "AAAA",
"address": "1.1.1.1",
"enabled": true,
"weight": 0,
"healthy": true,
"failure_reason": "No failures"
},
{
"name": "BBBB",
"address": "2.2.2.2",
"enabled": true,
"weight": 1,
"healthy": true,
"failure_reason": "No failures"
}
]
}
]
my best attempt but it doesn't fit my format. I tried many different options but could not find the correct code
(.[].origins[] | select(.address == "1.1.1.1") | .weight ) |= 0
Either perform setting the value first (where you could simply use = to set the new value as it does not depend on the old one), then use map({name, origins}) to get your expected output reduction:
(.[].origins[] | select(.address == "1.1.1.1")).weight = 0
| map({name, origins})
Demo
Or combine both within the map in one go:
map(
{name, origins}
| .origins[] |= (select(.address == "1.1.1.1").weight = 0)
)
Demo
Output:
[
{
"name": "test",
"origins": [
{
"name": "AAAA",
"address": "1.1.1.1",
"enabled": true,
"weight": 0,
"healthy": true,
"failure_reason": "No failures"
},
{
"name": "BBBB",
"address": "2.2.2.2",
"enabled": true,
"weight": 1,
"healthy": true,
"failure_reason": "No failures"
}
]
}
]
Here's a slightly different version, but pmf's answer is preferable.
map({
name,
origins: .origins | map(select(.address=="1.1.1.1").weight = 0)
})
If for whatever reason you want to avoid assignments with complex left-hand-sides (e.g., if you want portability across various versions of jq and jq look-alikes):
map( {name, origins}
| .origins |= map( if .address == "1.1.1.1" then .weight = 0 else . end) )
Because public IPFS gateway is too slow, I set up own ipfs cluster using kubernetes on AWS.
However, when I tried to get files from the cluster, I succeeded for some files but failed for others consistently(failed one kept failing).
How do I debug this? Did I make mistake on configuration? Here's the configuration I used.
{
"API": {
"HTTPHeaders": {
"Access-Control-Allow-Methods": [
"PUT",
"POST"
],
"Access-Control-Allow-Origin": [
"http://localhost:3000",
"http://127.0.0.1:5001",
"https://webui.ipfs.io"
]
}
},
"Addresses": {
"API": "/ip4/0.0.0.0/tcp/5001",
"Announce": [],
"AppendAnnounce": [],
"Gateway": "/ip4/0.0.0.0/tcp/8080",
"NoAnnounce": [],
"Swarm": [
"/ip4/0.0.0.0/tcp/4001",
"/ip6/::/tcp/4001",
"/ip4/0.0.0.0/udp/4001/quic",
"/ip6/::/udp/4001/quic"
]
},
"AutoNAT": {},
"Bootstrap": [
"/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
"/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
"/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
"/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
"/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ",
"/ip4/104.131.131.82/udp/4001/quic/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
],
"DNS": {
"Resolvers": {}
},
"Datastore": {
"BloomFilterSize": 0,
"GCPeriod": "1h",
"HashOnRead": false,
"Spec": {
"mounts": [
{
"child": {
"path": "blocks",
"shardFunc": "/repo/flatfs/shard/v1/next-to-last/2",
"sync": true,
"type": "flatfs"
},
"mountpoint": "/blocks",
"prefix": "flatfs.datastore",
"type": "measure"
},
{
"child": {
"compression": "none",
"path": "datastore",
"type": "levelds"
},
"mountpoint": "/",
"prefix": "leveldb.datastore",
"type": "measure"
}
],
"type": "mount"
},
"StorageGCWatermark": 90,
"StorageMax": "10GB"
},
"Discovery": {
"MDNS": {
"Enabled": true,
"Interval": 10
}
},
"Experimental": {
"AcceleratedDHTClient": false,
"FilestoreEnabled": false,
"GraphsyncEnabled": false,
"Libp2pStreamMounting": false,
"P2pHttpProxy": false,
"StrategicProviding": false,
"UrlstoreEnabled": false
},
"Gateway": {
"APICommands": [],
"HTTPHeaders": {
"Access-Control-Allow-Headers": [
"X-Requested-With",
"Range",
"User-Agent"
],
"Access-Control-Allow-Methods": [
"GET"
],
"Access-Control-Allow-Origin": [
"*"
]
},
"NoDNSLink": false,
"NoFetch": false,
"PathPrefixes": [],
"PublicGateways": null,
"RootRedirect": "",
"Writable": false
},
"Identity": {
"PeerID": "<intentionally hide>"
},
"Internal": {},
"Ipns": {
"RecordLifetime": "",
"RepublishPeriod": "",
"ResolveCacheSize": 128
},
"Migration": {
"DownloadSources": [],
"Keep": ""
},
"Mounts": {
"FuseAllowOther": false,
"IPFS": "/ipfs",
"IPNS": "/ipns"
},
"Peering": {
"Peers": null
},
"Pinning": {
"RemoteServices": {}
},
"Plugins": {
"Plugins": null
},
"Provider": {
"Strategy": ""
},
"Pubsub": {
"DisableSigning": false,
"Router": ""
},
"Reprovider": {
"Interval": "12h",
"Strategy": "all"
},
"Routing": {
"Type": "dht"
},
"Swarm": {
"AddrFilters": null,
"ConnMgr": {
"GracePeriod": "20s",
"HighWater": 900,
"LowWater": 600,
"Type": "basic"
},
"DisableBandwidthMetrics": false,
"DisableNatPortMap": false,
"RelayClient": {
"Enabled": true
},
"RelayService": {
"Enabled": true
},
"Transports": {
"Multiplexers": {},
"Network": {},
"Security": {}
}
}
}
I am trying to modify a large json file (a Grafana dashboard), replacing a single value, then output the whole file with the change. How can I do this?
You can see the value I want to edit here. The actual file is quite large, so there are many other top-level values, but I only need to edit a specific item under the "templating" block.
"templating": {
"list": [
{
"allValue": ".*",
"current": {},
"datasource": "$Source",
"hide": 0,
"includeAll": false,
"label": null,
"multi": true,
"name": "node",
"options": [],
"query": "label_values(node_boot_time{env=~\"$env\"}, instance)",
"refresh": 1,
"regex": "",
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"allValue": null,
"current": {
"tags": [],
"text": "",
"value": ""
},
"datasource": "$Source",
"definition": "label_values(env)",
"hide": 0,
"includeAll": true,
"label": "env",
"multi": false,
"name": "env",
"options": [],
"query": "label_values(env)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
},
{
"current": {
"tags": [],
"text": "",
"value": ""
},
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "Source",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
}
]
},
The piece I need to change is the block containing "query": "label_values(env)", and I just need to change the value of "regex": "",
I have tried:
jq '.templating.list[] | select(.name == "env") |= . + {regex:"*"}' "dashboard.json" > test.json
The problem is then it only prints the ".list[]" elements instead of the whole file. I need to be able to make this change for multiple other files that will have the same block, but not necessarily in the same place so I can't just select by index number.
Output of above script:
{
"allValue": ".*",
"current": {},
"datasource": "$Source",
"hide": 0,
"includeAll": false,
"label": null,
"multi": true,
"name": "node",
"options": [],
"query": "label_values(node_boot_time{env=~\"$env\"}, instance)",
"refresh": 1,
"regex": "",
"sort": 0,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
{
"allValue": null,
"current": {
"tags": [],
"text": "",
"value": ""
},
"datasource": "$Source",
"definition": "label_values(env)",
"hide": 0,
"includeAll": true,
"label": "env",
"multi": false,
"name": "env",
"options": [],
"query": "label_values(env)",
"refresh": 1,
"regex": "*",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
"tags": [],
"tagsQuery": "",
"type": "query",
"useTags": false
}
{
"current": {
"tags": [],
"text": "",
"value": ""
},
"hide": 0,
"includeAll": false,
"label": null,
"multi": false,
"name": "Source",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
}
Position |= earlier to retain the original structure.
.templating.list[] |= (select(.name == "env") .regex = "*")
Online demo
Your expected output isn't quite matching with your description of your problem. If your requirement is to find inside templating list find the query containing "label_values(env)" and update the regex to "" you need below. To change it to *, use regex = "*"
.templating.list[] |= ( select(.query == "label_values(env)").regex = "")
The key is to use the right path and use the select operator to get the object to update using the |= operator
jq-play snippet
I am trying to start a bunch of EC2 instances, then install something on them based on the IP given by AWS. With only one EC2, I can add the host and proceed without any issue,but when I chain them using with_dict, I can't achieve it anymore...
The following runs as I want, but I can't understand how to deal with the registered variable ec2_infos I got from the provisioning...
- name: Create Test EC2 instances
ec2:
group: default
image: ami-40d28157
instance_type: '{{item.value.type}}'
instance_tags:
Name: "{{ tag+'-'+item.value.name }}"
key_name: privatekey
region: us-west-1
vpc_subnet_id: subnet-REDACTD
wait: yes
with_dict: '{{ec2_stack}}'
register: ec2_infos
With a dictionary like
ec2_stack:
serv1:
type: t2.micro
name: server1
serv2:
type: t2.small
name: server2
ec2_infos is structures like:
"ec2_infos": {
"changed": true,
"msg": "All items completed",
"results": [
{
"_ansible_item_result": true,
"_ansible_no_log": false,
"_ansible_parsed": true,
"changed": true,
"instance_ids": [
"i-0fewq09812ddq6"
],
"instances": [
{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-0987654"
}
},
"dns_name": "",
"ebs_optimized": false,
"groups": {
"sg-qdwdww": "default"
},
"hypervisor": "xen",
"id": "i-083665656521dwq6",
"image_id": "ami-40d28157",
"launch_time": "2016-11-24T20:38:53.000Z",
"placement": "us-west-1d",
"private_ip": "x.x.x.x",
"public_dns_name": "",
"public_ip": null,
"ramdisk": null,
"region": "us-east-1",
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "running",
"state_code": 16,
"tags": {
"Name": "server1",
"Team": "blah"
},
"tenancy": "default","tenancy": "default",
"virtualization_type": "hvm"
}
],
"invocation": {
"module_args": {
"assign_public_ip": false,
"exact_count": null,
"group": [
"default"
],
"group_id": null,
"id": null,
"image": "ami-40d28157",
"instance_ids": null,
"instance_initiated_shutdown_behavior": null,
"instance_profile_name": null,
"instance_tags": {
"Name": "server1",
"Team": "blah"
},
"instance_type": "t2.micro",
"kernel": null,
"volumes": null,
"vpc_subnet_id": "subnet-abcdfed",
"wait": true,
"wait_timeout": "300",
"zone": null
},
"module_name": "ec2"
},
"item": {
"key": "serv1",
"value": {
"name": "server1",
"type": "t2.micro"
}
},
"tagged_instances": []
},
{
"_ansible_item_result": true,
"_ansible_no_log": false,
"_ansible_parsed": true,
"changed": true,
"instance_ids": [
"i-0971278624334fd"
],
"instances": [
{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-9999999"
}
},
"dns_name": "",
"ebs_optimized": false,
"groups": {
"sg-redactd": "default"
},
"launch_time": "2016-11-24T20:39:21.000Z",
"private_ip": "y.y.y.y",
"public_dns_name": "",
"public_ip": null,
"ramdisk": null,
"state": "running",
"state_code": 16,
"tags": {
"Name": "serv2",
"Team": "blah"
},
"tenancy": "default",
"virtualization_type": "hvm"
}
],
"invocation": {
"module_args": {
"assign_public_ip": false,
"wait_timeout": "300",
"zone": null
},
"module_name": "ec2"
},
"item": {
"key": "server2",
"value": {
"name": "serv2",
"type": "t2.small"
}
},
"tagged_instances": []
}
]
}
I tried with_items and with_subelements in different ways, but I can't manage to get every IPs of the new EC2. I don't even need to sort them just extract them from the instances part and feed them to add_host so I can proceed.
Anybody knows a clean way to do so, or would be kind enough to explain to me how to deal with a registered variable after a loop properly ?
Answer from the comments:
ec2_infos.results | map(attribute='instances') | sum(start=[]) | map(attribute='private_ip') | list