recursive parsing in json and get the value using ansible - json

I need to get the value of key in json from ansible play. Here is the issue is - Not sure the key will be in particular level of Json. It may vary from json to json. So, I need to 'serach' for the keys in json blindly and get the values for them [Not sure how many keys are avalible and how deep they are available]. Here I'm giving the sample json and play to achieve same.
JSON Data:
{
"status": "SUCCESS",
"errors": [
{
"jobname": "DbcAllJob_td_part_20190211150403_1",
"errorList": [
{
"id": "1",
"migrationId": "b9e19f6b-bc2e-4e2b-8fe9-20efb74d8289",
"jobId": "27",
"backupPhase": "DATA",
"restorePhase": "NULL",
"errorType": "ABORT",
"atTimeStamp": "2019/02/11 15:27:46",
"errorStatus": "UNRESOLVED",
"errorcode": "1148",
"errortext": "1148: DSA DSMAIN-BARNC Communication Exception: 1001:(**********K) recv() call has EOF",
"report": {
"root": {
"system": "TARGET",
"host": "**********",
"state_text": "2019/02/11 07:26:52.181 [ 27:29152:29152] ERROR Executor - Exception received by master thread: recv() call has EOF",
"entityId": "**********_clienthandler",
"logTime": "2019/02/11 07:26:52.181",
"peers": [
"**********_clienthandler"
],
"phase": "",
"rootError": "BarNC - User shutdown request from command line received, stopping BARNC",
"software": "clienthandler"
},
"sequence": [
{
"host": "sdt09586",
"state_text": "Feb 11 07:25:20 sdt09586 DSA[27578]: CRITICAL: DSA: 4751002 #CRITICAL: DataStreamController: 4751002 #DSC version[16.20.24.00], Commons version[16.20.24.00], DSC cannot connect to activeMQ Broker.",
"entityId": "sdt09586_messages",
"logTime": "2019/02/11 7:25:20.000",
"peers": [],
"phase": "",
"software": "messages"
},
{
"system": "SOURCE",
"host": "**********",
"state_text": "Feb 11 07:26:52 ********** Teradata[19593]: INFO: Teradata: 6670 # DSMAIN 1148: DSA DSMAIN-BARNC Communication Exception: 1001:(**********K) recv() call has EOF",
"entityId": "**********_messages",
"logTime": "2019/02/11 7:26:52.000",
"peers": [],
"phase": "",
"software": "messages"
},
{
"system": "SOURCE",
"host": "**********",
"state_text": "Feb 11 07:26:52 ********** Teradata[6369]: INFO: Teradata: 3265 #Transaction has been Aborted by Administrator or Operations Staff.",
"entityId": "**********_messages",
"logTime": "2019/02/11 7:26:52.000",
"peers": [],
"phase": "",
"software": "messages"
},
{
"system": "SOURCE",
"host": "**********",
"state_text": "2019/02/11 07:26:52.900 (134226672): Exception rethrown caught in DSMBackupTask::BeginWriteTask.",
"entityId": "27_**********_barlog_26623_34_3_3",
"logTime": "2019/02/11 07:26:52.900",
"peers": [
"AMP =1"
],
"phase": "",
"software": "barlog_26623_34_3_3"
},
{
"system": "SOURCE",
"host": "**********",
"state_text": "2019/02/11 07:26:52.900 (134226672): SendInternalAbort assumeMsgFromStpAab = 1",
"entityId": "27_**********_barlog_26623_34_3_3",
"logTime": "2019/02/11 07:26:52.900",
"peers": [
"AMP =1"
],
"phase": "",
"software": "barlog_26623_34_3_3"
},
{
"system": "SOURCE",
"host": "**********",
"state_text": "2019/02/11 07:26:52.900 (134226672): SendInternalAbort, partition = 34, jobId = 27",
"entityId": "27_**********_barlog_26623_34_3_3",
"logTime": "2019/02/11 07:26:52.900",
"peers": [
"AMP =1"
],
"phase": "",
"software": "barlog_26623_34_3_3"
},
{
"system": "SOURCE",
"host": "**********",
"state_text": "2019/02/11 07:26:52.900 (134226672): errorCode = 1148, errorTextLength = 84, errorText = 1148: DSA DSMAIN-BARNC Communication Exception: 1001:(**********K) recv() call has EOF",
"entityId": "27_**********_barlog_26623_34_3_3",
"logTime": "2019/02/11 07:26:52.900",
"peers": [
"AMP =1"
],
"phase": "",
"software": "barlog_26623_34_3_3"
},
{
"system": "SOURCE",
"host": "**********",
"state_text": "2019/02/11 07:26:52.910 (134254672): AbortTask has a message...",
"entityId": "27_**********_barlog_26623_34",
"logTime": "2019/02/11 07:26:52.910",
"peers": [],
"phase": "DATA/BUILD",
"software": "barlog_26623_34"
}
]
},
"remedy": [
{
"seqNum": 1,
"remedy": "Configure all machines to have same bynet configuration",
"remedyOpcode": "RECONFIGURE_BYNET",
"remedyInfo": [
{
"systemName": "**********",
"ipAddress": "**********",
"username": "**********",
"password": "**********",
"osUser": "root"
},
{
"systemName": "**********",
"ipAddress": "**********",
"username": "**********",
"password": "**********",
"osUser": "root"
}
]
},
{
"seqNum": 2,
"remedy": "Bring ClientHandler up on **********",
"remedyOpcode": "RESTART_SERVICE",
"remedyInfo": {
"systemName": "**********",
"ipAddress": "**********",
"username": "**********",
"password": "**********",
"osUser": "root"
}
},
{
"seqNum": 3,
"remedy": "Re-submit the job",
"remedyOpcode": "RESUBMIT",
"remedyInfo": null
}
],
"detailedreport": [
{
"component": "TPANode : **********",
"description": "TPANODE status : Up and Running",
"additionalProperties": {}
},
{
"component": "TPANode : **********",
"description": "TPANODE status : Up and Running",
"additionalProperties": {}
},
{
"component": "Bynet : [**********, **********]",
"description": "Bynet configuration mismatch",
"additionalProperties": {}
},
{
"component": "ClientHandler : **********",
"description": "ClientHandler status : not running",
"additionalProperties": {}
},
{
"component": "ActiveMQ : **********",
"description": "ActiveMQ status : running",
"additionalProperties": {}
},
{
"component": "ActiveMQ Queue: **********",
"description": "ActiveMQ message count : 13",
"additionalProperties": {}
},
{
"component": "DSC : **********",
"description": "DSC status : running, DSARest status : running",
"additionalProperties": {}
},
{
"component": "DSC Processes: **********",
"description": "Number of DSC processes running : 1",
"additionalProperties": {}
},
{
"component": "DSMAIN : **********",
"description": "DSMain status : Up and Running, Left over Processes : No left over processes",
"additionalProperties": {}
},
{
"component": "DSMAIN : **********",
"description": "DSMain status : Up and Running, Left over Processes : No left over processes",
"additionalProperties": {}
},
{
"component": "DatabaseConnection : **********",
"description": "Database status : UP",
"additionalProperties": {}
},
{
"component": "DatabaseConnection : **********",
"description": "Database status : UP",
"additionalProperties": {}
},
{
"component": "DatabaseConnection : **********",
"description": "Database status : UP",
"additionalProperties": {}
}
]
},
{
"id": "2",
"migrationId": "b9e19f6b-bc2e-4e2b-8fe9-20efb74d8289",
"jobId": "27",
"backupPhase": null,
"restorePhase": null,
"errorType": "ABORT",
"atTimeStamp": "",
"errorStatus": null,
"errorcode": "1160",
"errortext": "An error occurred restoring statistics.",
"report": {
"root": null,
"sequence": null
},
"remedy": [
{
"seqNum": -1,
"remedy": null,
"remedyOpcode": null,
"remedyInfo": null
}
],
"detailedreport": [
{
"component": null,
"description": null,
"additionalProperties": {}
}
]
}
]
}
]
}
From above json I need to read the 'remedy' values. But not sure at what level they are available always.
Here is the ansible script for the same.
---
- hosts: localhost
tasks:
- name: loop
set_fact:
migration_detail_status: "{{ lookup('file', 'inj_error.json') }}"
until: "'{{ migration_detail_status.errors.0.errorList.0.remedy.1.remedy | search('Bring ClientHandler up on') }}' and '{{ migration_detail_status.errors.0.errorList.0.remedy.1.remedyOpcode | search('RESTART_SERVICE') }}'"
retries: 2
delay: 5
ignore_errors: yes
- debug:
var: migration_detail_status.errors.0.errorList.0.remedy.1.remedyOpcode
At until condition I've traversed over json as I know the format of json. but, it may not be same for all the cases. So, please help to traverse to get the searching key:value from json irrespective of their level.

You can get a list of all remedy's with JMESPath query: errors[].errorList[].remedy[].remedy.
[
"Configure all machines to have same bynet configuration",
"Bring ClientHandler up on **********",
"Re-submit the job"
]
In Ansible you would use json_query filter to apply JMESPath queries.
If you also need opcodes: errors[].errorList[].remedy[].{r:remedy,o:remedyOpcode}
[
{
"r": "Configure all machines to have same bynet configuration",
"o": "RECONFIGURE_BYNET"
},
{
"r": "Bring ClientHandler up on **********",
"o": "RESTART_SERVICE"
},
{
"r": "Re-submit the job",
"o": "RESUBMIT"
},
{
"r": null,
"o": null
}
]
Example:
- debug:
msg: "{{ migration_detail_status | json_query('errors[].errorList[].remedy[].remedy') }}"

Related

How to print nested JSON array data in a tabular format?

I want to read the status of clusters and servers inside it.
Below is the sample json file
"data": [{
"id": 7865,
"timeCreated": 1602589399294,
"timeUpdated": 1602748892149,
"name": "gw-ext-1",
"type": "CLUSTER",
"status": "RUNNING",
"multicastEnabled": false,
"primaryNodeId": 546,
"servers": [{
"id": 768,
"timeCreated": 1602589028419,
"timeUpdated": 1602747941321,
"name": "gw-jpg208765-1",
"type": "SERVER",
"serverType": "GATEWAY",
"status": "RUNNING",
"addresses": [{
"networkInterface": "eng123"
},
{
"networkInterface": "eng124"
}],
"clusterId": 098,
"clusterName": "gw-ext-1",
"currentClusteringPort": 897,
"runtimeInformation": {
"Information": {
"runtime": {
"name": "abctech",
"version": "1.6.8"
},
"specification": {
"vendor": "rrr",
"name": "rrrt",
"version": "1.8.89"
}
},
"osInformation": {
"name": "LX",
"version": "35",
"architecture": "klh"
},
"mExpirationDate": 098765589283662
}
},
{
"id": 876,
"timeCreated": 1602589007370,
"timeUpdated": 1602748894901,
"name": "gw-jpg208765-2",
"type": "SERVER",
"serverType": "GATEWAY",
"mVersion": "3.9.1",
"gaVersion": "3.9.1",
"agentVersion": "1.9.5",
"ExpirationDate": 32521996800000,
"ExpirationDate": 1665661007000,
"status": "DISCONNECTED",
"addresses": [{
"networkInterface": "engg"
},
{
"networkInterface": "engg"
}],
"clusterId": 768,
"clusterName": "gw-ext-1",
"serverPort": 987,
"currentClusteringPort": 987,
"runtimeInformation": {
"abcInfo": {
"runtime": {
"name": "abc",
"version": "1.2.3"
},
"specification": {
"vendor": "RRR",
"name": "RTR",
"version": "1.8.0"
}
},
"osInformation": {
"name": "LX",
"version": "4.78",
"architecture": "eng"
},
"ExpirationDate": 8765478999765
}
}],
"visibilityMap": {
"mapNodes": [{
"serverId": 765,
"visibleNodeIds": [765,
876],
"unknownNodeIps": []
},
{
"serverId": 876,
"visibleNodeIds": [765,
876],
"unknownNodeIps": []
}]
}
},
{
"id": 7865,
"timeCreated": 1602589399294,
"timeUpdated": 1602748892149,
"name": "gw-ext-2",
"type": "CLUSTER",
"status": "RUNNING",
"multicastEnabled": false,
"primaryNodeId": 546,
"servers": [{
"id": 768,
"timeCreated": 1602589028419,
"timeUpdated": 1602747941321,
"name": "gw-jpg208766-1",
"type": "SERVER",
"serverType": "GATEWAY",
"status": "RUNNING",
"addresses": [{
"networkInterface": "eng123"
},
{
"networkInterface": "eng124"
}],
"clusterId": 098,
"clusterName": "gw-ext-2",
"currentClusteringPort": 897,
"runtimeInformation": {
"Information": {
"runtime": {
"name": "abctech",
"version": "1.6.8"
},
"specification": {
"vendor": "rrr",
"name": "rrrt",
"version": "1.8.89"
}
},
"osInformation": {
"name": "LX",
"version": "35",
"architecture": "klh"
},
"mExpirationDate": 098765589283662
}
},
{
"id": 876,
"timeCreated": 1602589007370,
"timeUpdated": 1602748894901,
"name": "gw-jpg208766-2",
"type": "SERVER",
"serverType": "GATEWAY",
"mVersion": "3.9.1",
"gaVersion": "3.9.1",
"agentVersion": "1.9.5",
"ExpirationDate": 32521996800000,
"ExpirationDate": 1665661007000,
"status": "DISCONNECTED",
"addresses": [{
"networkInterface": "engg"
},
{
"networkInterface": "engg"
}],
"clusterId": 768,
"clusterName": "gw-ext-2",
"serverPort": 987,
"currentClusteringPort": 987,
"runtimeInformation": {
"abcInfo": {
"runtime": {
"name": "abc",
"version": "1.2.3"
},
"specification": {
"vendor": "RRR",
"name": "RTR",
"version": "1.8.0"
}
},
"osInformation": {
"name": "LX",
"version": "4.78",
"architecture": "eng"
},
"ExpirationDate": 8765478999765
}
}],
"visibilityMap": {
"mapNodes": [{
"serverId": 765,
"visibleNodeIds": [765,
876],
"unknownNodeIps": []
},
{
"serverId": 876,
"visibleNodeIds": [765,
876],
"unknownNodeIps": []
}]
}
}]
So in each cluster we have two servers and this json continues to have around 15 clusters.
I want to filter out the status of each cluster and server in below format
name cluster/server status
gw-ext-1 CLUSTER RUNNING
gw-jpg208765-1 SERVER RUNNING
gw-jpg208765-2 SERVER DISCONNECTED
similarly for other clusters also.
I tried few things but its not giving me the servers .. it gives only cluster's details
target_id=echo \$targetIdResponse | ${env.WORKSPACE}/jq -r '.data[] | [.name, .type, .status]'
OR
target_id=echo \$targetIdResponse | ${env.WORKSPACE}/jq -r '.data[] | [.name, .type, .status, .servers.name, .servers.type, .servers.status]'
where $targetIdResponse contains my json data
I want to know how i can filter the above json to get the required data.
You need to have the header array the required fields in a separate array and put them together in a tabular format using #tsv
jq -r '[ "name", "cluster/server", "status" ],
( .data[] | [.name, .type, .status] ),
( .data[].servers[] | [ .name, .type, .status ] ) | #tsv'
The requirement was modified since originally posted to have the server information exactly below the cluster information
jq -r '[ "name", "cluster/server", "status" ],
( .data[] | [.name, .type, .status], ( .servers[] | [.name, .type, .status] ) ) | #tsv'

jq query returning too many records (unwanted permutations)

I have a complex JSON file and I am trying to get the below result using JQ.
Expected Result:
{
"Host": "Test.example.com",
"Title": "Ensure message of the day is configured properly",
"Status": "passed"
}
{
"Host": "Test.example.com",
"Title": "Ensure bond0 is present",
"Status": "passed"
}
{
"Host": "Test.example.com",
"Title": "Ensure the SELinux state is disabled",
"Status": "passed"
}
Below is the JSON file that I get as a result of running an Chef Inspec profile.
JSON FILE:
{
"platform": {
"name": "redhat",
"release": "7.7",
"target_id": "Test.example.com"
},
"profiles": [
{
"name": "my-test",
"version": "0.1.0",
"sha256": "6fea36d6c12b21cd51274774edb4200d983db45c4cfa8172eebb897b6f3db8fe",
"title": "InSpec Profile",
"maintainer": "The Authors",
"summary": "An InSpec Compliance Profile",
"license": "Apache-2.0",
"copyright": "The Authors",
"copyright_email": "you#example.com",
"supports": [
{
"platform": "os"
}
],
"attributes": [],
"groups": [
{
"id": "controls/1_motd.rb",
"controls": [
"1.1 motd check"
],
"title": "Warning Banners"
},
{
"id": "controls/6_network.rb",
"controls": [
"6.1 Bonding"
]
},
{
"id": "controls/12_selinux.rb",
"controls": [
"Selinux Config"
]
}
],
"controls": [
{
"id": "1.1 motd check",
"title": "Ensure message of the day is configured properly",
"desc": "Ensure message of the day is configured properly",
"descriptions": [
{
"label": "default",
"data": "Ensure message of the day is configured properly"
}
],
"impact": 1,
"refs": [],
"tags": {},
"code": "control '1.1 motd check' do\n title 'Ensure message of the day is configured properly'\n desc \"The contents of the /etc/motd file are displayed to users after login and function as a message of the day for authenticated users.\"\n desc \"Ensure message of the day is configured properly\"\n impact 1.0\n \n describe file('/etc/motd') do\n its('content') { should_not be_empty }\n end\nend\n",
"source_location": {
"line": 3,
"ref": "my-test/controls/1_motd.rb"
},
"waiver_data": {},
"results": [
{
"status": "passed",
"code_desc": "File /etc/motd content should not be empty",
"run_time": 0.099938054,
"start_time": "2020-02-07T11:31:47+11:00"
}
]
},
{
"id": "6.1 Bonding",
"title": "Ensure bond0 is present",
"desc": "Check bonding is present",
"descriptions": [
{
"label": "default",
"data": "Check bonding is present"
}
],
"impact": 1,
"refs": [],
"tags": {},
"code": "control '6.1 Bonding' do\n title 'Ensure bond0 is present'\n desc \"Check bonding is present\"\n impact 1.0\n \n only_if { sys_info.manufacturer != \"VMware, Inc.\" } \n\n describe bond('bond0') do\n it { should exist }\n its('mode') { should eq 'IEEE 802.3ad Dynamic link aggregation' }\n end\nend\n",
"source_location": {
"line": 1,
"ref": "my-test/controls/6_network.rb"
},
"waiver_data": {},
"results": [
{
"status": "skipped",
"code_desc": "No-op",
"run_time": 9.02e-06,
"start_time": "2020-02-07T11:31:47+11:00",
"resource": "No-op",
"skip_message": "Skipped control due to only_if condition."
}
]
},
{
"id": "Selinux Config",
"title": "Ensure the SELinux state is disabled",
"desc": "Set SELinux to diabled",
"descriptions": [
{
"label": "default",
"data": "Set SELinux to diabled"
}
],
"impact": 1,
"refs": [],
"tags": {},
"code": "control 'Selinux Config' do\n title 'Ensure the SELinux state is disabled'\n desc \"Set SELinux to diabled\"\n impact 1.0\n\n describe file('/etc/selinux/config') do\n its(:content) { should match(/^SELINUX=disabled\\s*(?:#.*)?$/) }\n end\nend\n",
"source_location": {
"line": 1,
"ref": "my-test/controls/12_selinux.rb"
},
"waiver_data": {},
"results": [
{
"status": "passed",
"code_desc": "File /etc/selinux/config content should match /^SELINUX=disabled\\s*(?:#.*)?$/",
"run_time": 0.120881444,
"start_time": "2020-02-07T11:31:47+11:00"
}
]
}
],
"status": "loaded"
}
],
"statistics": {
"duration": 0.223533139
},
"version": "4.18.24"
}
I used the following JQ code to extract the fields , but the extracted values are repeating.
JQ Code:
jq '{Host: .platform.target_id,Title: .profiles[].controls[].title,Status: .profiles[].controls[].results[].status}'
The result which I got from above JQ filter
Actual Result:
{
"Host": "Test.example.com",
"Title": "Ensure message of the day is configured properly",
"Status": "passed"
}
{
"Host": "Test.example.com",
"Title": "Ensure message of the day is configured properly",
"Status": "skipped"
}
{
"Host": "Test.example.com",
"Title": "Ensure message of the day is configured properly",
"Status": "passed"
}
{
"Host": "Test.example.com",
"Title": "Ensure bond0 is present",
"Status": "passed"
}
{
"Host": "Test.example.com",
"Title": "Ensure bond0 is present",
"Status": "skipped"
}
{
"Host": "Test.example.com",
"Title": "Ensure bond0 is present",
"Status": "passed"
}
{
"Host": "Test.example.com",
"Title": "Ensure the SELinux state is disabled",
"Status": "passed"
}
{
"Host": "Test.example.com",
"Title": "Ensure the SELinux state is disabled",
"Status": "skipped"
}
{
"Host": "Test.example.com",
"Title": "Ensure the SELinux state is disabled",
"Status": "passed"
}
How can I get the expected results. Any help would be appreciated.
Because you're iterating over profiles[] more than once, you're combining the results of one such iteration with the results of another. To avoid that, iterate only once, and get both title and results from the same controls list entry.
jq '
.platform.target_id as $target |
.profiles[].controls[] |
{ Host: $target,
Title: .title,
Status: .results[].status }
'
See this running against your input data at https://jqplay.org/s/_6KVDfIn3m

Test connect DHCP Kea / ISC DHCPDISCOVER PACKET_NAK_0001

The DHCP server is on a different network. I lifted the virtual machine Linux, there are two interfaces. Error DHCPDISCOVER PACKET_NAK_0001.
On a Linux virtual machine, I execute the commands:
dhcrelay ip_dhcp -i name_interface
dhclient -v name_interface -s ip_dhcp
An example of a config which I send through "curl":
{
"command": "config-set",
"service": [
"dhcp4"
],
"arguments": {
"Dhcp4": {
"option-def": [
{
"name": "configRevision",
"code": 254,
"type": "string",
"space": "dhcp4"
}
],
"interfaces-config": {
"interfaces": [
"*"
],
"dhcp-socket-type": "udp"
},
"control-socket": {
"socket-type": "unix",
"socket-name": "/tmp/kea-dhcp4-ctrl.sock"
},
"lease-database": {
"type": "postgresql",
"host": "host",
"name": "name",
"user": "name",
"password": "pass",
"port": 5432,
"lfc-interval": 600
},
"expired-leases-processing": {
"reclaim-timer-wait-time": 10,
"flush-reclaimed-timer-wait-time": 25,
"hold-reclaimed-time": 3600,
"max-reclaim-leases": 100,
"max-reclaim-time": 250,
"unwarned-reclaim-cycles": 5
},
"valid-lifetime": 3600,
"authoritative": true,
"hooks-libraries": [
{
"library": "/usr/local/lib/hooks/libdhcp_lease_cmds.so"
},
{
"library": "/usr/local/lib/hooks/libdhcp_stat_cmds.so"
}
],
"option-data": [
{
"name": "configRevision",
"code": 254,
"data": "1",
"always-send": false
},
{
"name": "domain-name-servers",
"data": "<IP>, <IP>",
"always-send": true
},
{
"name": "time-servers",
"data": "<IP>",
"always-send": true
},
{
"name": "ntp-servers",
"data": "<IP>",
"always-send": true
},
{
"name": "domain-name",
"data": "<DOMAIN>",
"always-send": true
},
{
"name": "dhcp-server-identifier",
"data": "<IP>"
}
],
"shared-networks": [
{
"name": "Zone 1",
"relay": {
"ip-addresses": [
"172.100.100.100",
"<IP>",
"<IP>",
"<IP>"
]
},
"option-data": [],
"subnet4": [
{
"id": 1314,
"subnet": "172.100.100.99/23",
"option-data": [
{
"name": "routers",
"data": "172.100.100.100"
}
],
"pools": [
{
"pool": "172.100.100.130-172.100.100.254",
"client-class": "UNKNOWN"
}
],
"valid-lifetime": 86400,
"reservations": []
}
]
}
]
}
}
Expected Result:
Successful issuance of IP address.
Actual result:
ERROR [kea-dhcp4.bad-packets/26218] DHCP4_PACKET_NAK_0001 [hwtype=1
], cid=[no info], tid=0x23acf436: failed to select a subnet for
incoming packet, src 172.100.100.100, type DHCPDISCOVER
Problem lies with client-class, not being known in time for subnet selection. Viz Kea Docs
The determination whether there is a reservation for a given client is made after a subnet is selected, so it is not possible to use “KNOWN”/”UNKNOWN” classes to select a shared network or a subnet.

Extract some value from JSON using JsonPath with != condition in Jmeter

I have the following JSON and I need to get id values for instances which do not have type = Jenkins
{
"data": [
{
"id": "35002399-6fd7-40b7-b0d0-8be64e4ec09c",
"name": "94Jenkins",
"url": "http://127.0.0.1:8084",
"authProvider": false,
"siteId": "cce1b6e2-4b5d-4455-ac96-6b5d4c0d901d",
"status": {
"status": "ONLINE"
},
"instanceStateReady": true,
"instanceState": {
"#type": "InstanceStateDto",
"version": "2.60.3"
},
"adminUser": "admin1",
"hasDRConfig": false,
"managed": true,
"type": "JENKINS",
"siteName": "City",
"lastRefreshTime": "2018-04-24T09:43:01.694Z"
},
{
"id": "5cd3caf6-bac1-4f07-8793-5f124b90eaf5",
"name": "RJO",
"url": "http://test.com",
"authProvider": false,
"status": {
"status": "UNAUTHORIZED"
},
"instanceStateReady": true,
"instanceState": {
"#type": "numberOfArtifacts",
"version": "5.5.2-m002",
"licenses": {
"RJO : artrjo-m": {
"type": "ENTERPRISE",
"validThrough": "Jun 12, 2021",
"licensedTo": "Test",
"licenseHash": "asdadsdb612bda1aae745bd2a3",
"expired": false
},
"RJO : artrjo-s1": {
"type": "ENTERPRISE",
"validThrough": "Jun 12, 2021",
"licensedTo": "JFrog",
"licenseHash": "asaswca236350205a3798c0fa3",
"expired": false
}
}
},
"adminUser": "jfmc",
"hasDRConfig": false,
"managed": false,
"warnings": [
"Site is missing",
"Failed to connect to the service. Please verify that the service information provided is correct."
],
"type": "ARTIFACTORY"
},
{
"id": "0727a49a-6c95-433e-9fc5-7e5c760cc76f",
"name": "NinetyTwo",
"url": "http:127.0.0.1:8081",
"authProvider": true,
"siteId": "cce1b6e2-4b5d-4455-ac96-6b5d4c0d901d",
"status": {
"status": "ONLINE"
},
"instanceStateReady": true,
"instanceState": {
"#type": "numberOfArtifacts",
"version": "5.9.0",
"licenses": {
"NinetyTwo": {
"type": "ENTERPRISE",
"validThrough": "Dec 30, 2018",
"licensedTo": "Test",
"licenseHash": "qweqwed95f712dbabee98184da52443",
"expired": false
}
}
},
"adminUser": "admin",
"hasDRConfig": false,
"managed": true,
"type": "ARTIFACTORY",
"serviceId": "jfrt#01c7g4c7hq0dpd0qa71r8c09sj",
"siteName": "Test",
"lastRefreshTime": "2018-04-24T09:43:01.698Z"
}
]
}
And I use $..[?(#.type!='JENKINS')].id path to receive id-s which relate to the instances with type NOT JENKINS, however JSON Extractor returns me the Jenkins's id too. The question is how can I receive ids for non-jenkins instances only?
Replace your JSON path expression with the below and it should work:
$.data.[*][?(#.type != "JENKINS")].id

Ansible: EC2 provisioning and Iterations

I am trying to start a bunch of EC2 instances, then install something on them based on the IP given by AWS. With only one EC2, I can add the host and proceed without any issue,but when I chain them using with_dict, I can't achieve it anymore...
The following runs as I want, but I can't understand how to deal with the registered variable ec2_infos I got from the provisioning...
- name: Create Test EC2 instances
ec2:
group: default
image: ami-40d28157
instance_type: '{{item.value.type}}'
instance_tags:
Name: "{{ tag+'-'+item.value.name }}"
key_name: privatekey
region: us-west-1
vpc_subnet_id: subnet-REDACTD
wait: yes
with_dict: '{{ec2_stack}}'
register: ec2_infos
With a dictionary like
ec2_stack:
serv1:
type: t2.micro
name: server1
serv2:
type: t2.small
name: server2
ec2_infos is structures like:
"ec2_infos": {
"changed": true,
"msg": "All items completed",
"results": [
{
"_ansible_item_result": true,
"_ansible_no_log": false,
"_ansible_parsed": true,
"changed": true,
"instance_ids": [
"i-0fewq09812ddq6"
],
"instances": [
{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-0987654"
}
},
"dns_name": "",
"ebs_optimized": false,
"groups": {
"sg-qdwdww": "default"
},
"hypervisor": "xen",
"id": "i-083665656521dwq6",
"image_id": "ami-40d28157",
"launch_time": "2016-11-24T20:38:53.000Z",
"placement": "us-west-1d",
"private_ip": "x.x.x.x",
"public_dns_name": "",
"public_ip": null,
"ramdisk": null,
"region": "us-east-1",
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "running",
"state_code": 16,
"tags": {
"Name": "server1",
"Team": "blah"
},
"tenancy": "default","tenancy": "default",
"virtualization_type": "hvm"
}
],
"invocation": {
"module_args": {
"assign_public_ip": false,
"exact_count": null,
"group": [
"default"
],
"group_id": null,
"id": null,
"image": "ami-40d28157",
"instance_ids": null,
"instance_initiated_shutdown_behavior": null,
"instance_profile_name": null,
"instance_tags": {
"Name": "server1",
"Team": "blah"
},
"instance_type": "t2.micro",
"kernel": null,
"volumes": null,
"vpc_subnet_id": "subnet-abcdfed",
"wait": true,
"wait_timeout": "300",
"zone": null
},
"module_name": "ec2"
},
"item": {
"key": "serv1",
"value": {
"name": "server1",
"type": "t2.micro"
}
},
"tagged_instances": []
},
{
"_ansible_item_result": true,
"_ansible_no_log": false,
"_ansible_parsed": true,
"changed": true,
"instance_ids": [
"i-0971278624334fd"
],
"instances": [
{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-9999999"
}
},
"dns_name": "",
"ebs_optimized": false,
"groups": {
"sg-redactd": "default"
},
"launch_time": "2016-11-24T20:39:21.000Z",
"private_ip": "y.y.y.y",
"public_dns_name": "",
"public_ip": null,
"ramdisk": null,
"state": "running",
"state_code": 16,
"tags": {
"Name": "serv2",
"Team": "blah"
},
"tenancy": "default",
"virtualization_type": "hvm"
}
],
"invocation": {
"module_args": {
"assign_public_ip": false,
"wait_timeout": "300",
"zone": null
},
"module_name": "ec2"
},
"item": {
"key": "server2",
"value": {
"name": "serv2",
"type": "t2.small"
}
},
"tagged_instances": []
}
]
}
I tried with_items and with_subelements in different ways, but I can't manage to get every IPs of the new EC2. I don't even need to sort them just extract them from the instances part and feed them to add_host so I can proceed.
Anybody knows a clean way to do so, or would be kind enough to explain to me how to deal with a registered variable after a loop properly ?
Answer from the comments:
ec2_infos.results | map(attribute='instances') | sum(start=[]) | map(attribute='private_ip') | list