Create mysql db from json data - mysql

I'm reading JSON data from an ARC Server report online and trying to create a database with the data.
I've created the database named: test.db
I need the columns to be identified as "Service", "Folder", "Service URL", "Configured State", "Real Time State", "Server Type".
and the rows as each "Service" returned from the report.
The JSON data looks like this:
{"reports": [{
"folderName": "/",
"serviceName": "SampleWorldCities",
"type": "MapServer",
"description": "The SampleWorldCities service is provided so you can quickly and easily preview the functionality of the GIS server. Click the thumbnail image to open in a web application. This sample service is optional and can be deleted.",
"isDefault": false,
"isPrivate": false,
"hasManifest": false,
"status": {
"configuredState": "STARTED",
"realTimeState": "STARTED"
},
"instances": {
"folderName": "/",
"serviceName": "SampleWorldCities",
"type": "MapServer",
"max": 1,
"busy": 0,
"free": 1,
"initializing": 0,
"notCreated": 0,
"transactions": 72,
"totalBusyTime": 127611,
"isStatisticsAvailable": true
},
"properties": {
"maxRecordCount": "1000",
"filePath": "${AGSSERVER}/framework/etc/data/WorldCities/WorldCities.msd",
"cacheOnDemand": "false",
"useLocalCacheDir": "true",
"outputDir": "/home/ec2-user/arcgis/server/usr/directories/arcgisoutput",
"virtualOutputDir": "/rest/directories/arcgisoutput",
"supportedImageReturnTypes": "MIME+URL",
"minScale": "295000000",
"isCached": "false",
"ignoreCache": "false",
"maxScale": "4000",
"clientCachingAllowed": "true",
"cacheDir": "/home/ec2-user/arcgis/server/usr/directories/arcgiscache"
},
"iteminfo": {
"description": "The SampleWorldCities service is provided so you can quickly and easily preview the functionality of the GIS server. Click the thumbnail image to open in a web application. This sample service is optional and can be deleted.",
"summary": "The SampleWorldCities service is provided so you can quickly and easily preview the functionality of the GIS server. Click the thumbnail image to open in a web application. This sample service is optional and can be deleted.",
"tags": [
"sample",
"map",
"service"
],
"thumbnail": "thumbnail.png"
},
"permissions": [{
"principal": "esriEveryone",
"permission": {"isAllowed": true},
"childURL": null,
"operation": null
}]
}]}
my sript is as follows:
import json
import sqlite3
db = sqlite3.connect('test.db')
traffic = json_read
c = db.cursor()
someitem = traffic.itervalues().next()
columns = ['Service', 'Folder', 'Service URL', 'Configured State', 'Real Time State', 'Server Type']
c.execute("SELECT sql FROM sqlite_master WHERE " \
"Service='Services' AND type = 'table'")
create_table_string = cursor.fetchall()[0][0]
c.execute('''create table Services
(Service text primary key,
Folder text,
Service URL text,
Configured State text,
Real Time State text,
Server Type text)''')
for service, data in traffic.iteritems():
services = (service,) + tuple(data[c] for c in columns)
c = db.cursor()
c.execute(query)
c.close()
print "JSON Complete"
Can someone point me in the right direction?
Forgot to mention
Service is Service Name,
Folder is folder name,
service url is link to the service,
configured state is configuredState,
realtime state is realTimeState,
Server type is type

db = sqlite3.connect(
'server.db')
cursor = db.cursor()
cursor.execute("DROP TABLE if exists Services")
db.commit()
cursor.execute("DROP TABLE if exists Services2")
db.commit()
cursor.execute('''CREATE TABLE Services
(Service text,
Folder text,
Service_URL text,
Configured_State text,
Real_Time_State text,
Server text);''')
This was the code that gave me the output I was looking for.

Related

Telegram bot with aws lambda and API gateway

I am developing a telegram bot with python (telebot) , aws lambda and api gateway.
I have a problem in the lambda function and I can't understand why I have this kind of problem.
My lambda is this:
import telebot
import datetime
TOKEN = 'xxx'
def lambda_handler(event, context):
bot = telebot.TeleBot(TOKEN)
# Extract the message key over payload's body
message = json.loads(event['body'])
print(message)
# Split between three variables bellow
chat_id = message['chat']['id'] # Chat ID will guide your chatbot reply
sender = message['from']['first_name'] # Sender's first name, registered by user's telegram app
text = message['text'] # The message content
if text.lower().strip() == "/time":
current_time = datetime.strftime(datetime.now(), "%H:%M:%S")
bot.send_message(chat_id, "Right now its {} UTC.".format(current_time))
else:
pass
The error I get, running the test is this:
Response
{
"errorMessage": "'body'",
"errorType": "KeyError",
"stackTrace": [
" File \"/var/task/lambda_function.py\", line 10, in lambda_handler\n message = json.loads(event['body'])\n"
]
}
The given json file:
{
"update_id": 0000000,
"message": {
"message_id": 000000,
"from": {
"id": 00000000,
"is_bot": false,
"first_name": "myname",
"last_name": "mysurname",
"username": "sursurname",
"language_code": "it"
},
"chat": {
"id": 000000,
"first_name": "myname",
"last_name": "mysurname",
"username": "sursurname",
"type": "private"
},
"date": 1654697178,
"forward_from": {
"id": 00000000,
"is_bot": false,
"first_name": "mysurname",
"last_name": "mysurname",
"username": "sursurname",
"language_code": "it"
},
"forward_date": 0000000000,
"text": "ciao"
}
}
I cannot understand why it is not able to read the body in any way, maybe I am in the wrong library? Do you have any suggestions to help me with this?
event['body'] is almost definitely not the correct key to access data passed through an event. The event passes information in a nested dictionary and you'll need to figure out how to drill down to the correct key.
I solved it by doing this:
json_str = json.dumps(event)
resp = json.loads(json_str)
chat_id = resp['message']['chat']['id']
message_text = resp['message']['text']
message_chat_id = resp['message']['chat']['id']
message_username = resp['message']['from']['first_name']
bot = telebot.TeleBot(TOKEN)
bot.send_message(chat_id, "Hey, I understood this message!, hi {}".format(message_username))

Nexmo Voice API - connect two users and play a different talk action to each user

I'm making an outbound call to a customer using POST https://api.nexmo.com/v1/calls/.
I pass in this NCCO which plays the talk Hello customer, please wait while we connect you to the customer and connects to a salesperson (SALESPERSON_PHONE_NUMBER).
What I want to do is play a different talk to the salesperson only when they answer, something like outbound call to customer for Example Company
[
{
"action": "talk",
"text": "Hello customer, please wait while we connect you."
},
{
"action": "connect",
"timeout": 20,
"from": "MY_NEXMO_PHONE_NUMBER",
"endpoint": [
{
"type": "phone",
"number": "SALESPERSON_PHONE_NUMBER"
}
]
}
]
How can I play a different talk message to the salesperson only? I could not see anything in the documentation.
The connect NCCO action has an onAnswer option. From the documentation:
onAnswer - A JSON object containing a required url key. The URL serves an NCCO to execute in the number being connected to, before that call is joined to your existing conversation. Optionally, the ringbackTone key can be specified with a URL value that points to a ringbackTone to be played back on repeat to the caller, so they do not hear just silence. The ringbackTone will automatically stop playing when the call is fully connected. Example: {"url":"https://example.com/answer", "ringbackTone":"http://example.com/ringbackTone.wav" }. Please note, the key ringback is still supported.
So if you change your NCCO to look something like this, the salesperson will hear the talk action in the second NCCO, while the caller hears music.
[
{
"action": "talk",
"text": "Hello customer, please wait while we connect you."
},
{
"action": "connect",
"timeout": 20,
"from": "MY_NEXMO_PHONE_NUMBER",
"endpoint": [
{
"type": "phone",
"number": "SALESPERSON_PHONE_NUMBER",
"onAnswer": {
"url":"https://example.com/answer",
"ringbackTone":"http://example.com/ringbackTone.wav"
}
}
]
}
]
https://example.com/answer should be
[{
"action": "talk",
"text": "Hello salesperson, please wait while we connect you."
}]
It looks like you want to use the onAnswer functionality in the connect action
https://developer.nexmo.com/voice/voice-api/ncco-reference#connect
A JSON object containing a required url key. The URL serves an NCCO to execute in the number being connected to, before that call is joined to your existing conversation.
[
{
"action": "talk",
"text": "Hello customer, please wait while we connect you."
},
{
"action": "connect",
"timeout": 20,
"from": "MY_NEXMO_PHONE_NUMBER",
"endpoint": [
{
"type": "phone",
"number": "SALESPERSON_PHONE_NUMBER",
"onAnswer": {"url": "https://example.com/my-on-answer-ncco"}
}
]
}
]
Then at https://example.com/my-on-answer-ncco, you return an NCCO containing a talk action

How to refer a resource which is already created in ARM template?

I'm trying to create alerts for my cosmosdb account using arm template, the cosmosdb is already created, so Im not able use dependsOn to refer the rosurce.
"resources": [
{
"type": "microsoft.insights/alertrules",
"name": "[parameters('alertrules_alert_name')]",
"apiVersion": "2014-04-01",
"location": "southcentralus",
"scale": null,
"properties": {
"name": "[parameters('alertrules_alert_name')]",
"description": null,
"isEnabled": true,
"condition": {
"odata.type": "Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition",
"dataSource": {
"odata.type": "Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource",
"resourceUri": "[resourceId('Microsoft.DocumentDB/databaseAccounts', parameters('databaseAccounts_cosmosaccount_name_1'))]",
"metricNamespace": null,
"metricName": "Http 401"
},
"operator": "GreaterThan",
"threshold": 1,
"windowSize": "PT30M"
},
"action": null
}
}
],
"outputs": {}
}
Please review the following documentation to enable (Classic) Alerts and Diagnostic Settings via ARM template when creating a NEW Cosmos DB resource.
1) Create a classic metric alert with a Resource Manager template
2) Automatically enable Diagnostic Settings at resource creation using a Resource Manager template
3) Azure Cosmos DB diagnostic logging
Please Up Vote the existing entries for ARM templete functionality or create a new User Voice entry that is specific to your use case: Azure Cosmos DB User Voice

Get host status by CheckMK Web-API

I'm trying to get the status of a host with the CheckMK WebAPI. Can someone point me in the right direction how to get these data?
We're currently using CheckMK enterprise 1.4.0.
I've tried:
https://<monitoringhost.tld>/<site>/check_mk/webapi.py?action=get_host&_username=<user>&_secret=<secret>&output_format=json&effective_attributes=1&request={"hostname": "<hostname>"}
But the response does not have any relevant information about the host itself (e.g. state up/down, uptime, etc.).
{
"result": {
"attributes": {
"network_scan": {
"scan_interval": 86400,
"exclude_ranges": [],
"ip_ranges": [],
"run_as": "api"
},
"tag_agent": "cmk-agent",
"snmp_community": null,
"ipv6address": "",
"alias": "",
"management_protocol": null,
"site": "testjke",
"tag_address_family": "ip-v4-only",
"tag_criticality": "prod",
"contactgroups": [
true,
[]
],
"network_scan_result": {
"start": null,
"state": null,
"end": null,
"output": ""
},
"parents": [],
"management_address": "",
"tag_networking": "lan",
"ipaddress": "",
"management_snmp_community": null
},
"hostname": "<host>",
"path": ""
},
"result_code": 0
The webapi is only for getting/setting the configuration of a host or other objects. If you want't to get the live status of a host use livestatus.
If you enabled livestats on port 6557 (default) you could query the status of a host via network. If you are logged into a shell locally you can use 'lq'.
OMD[mysite]:~$ lq "GET hosts\nColumns: name"
Why:
The CheckMK webapi if for accessing WATO. WATO is the source for creating the nagios configuration. Nagios will do the monitoring of the hosts and the livestatus api is an extension of the nagios core.
http://<monitoringhost.tld>/<site>/check_mk/view.py?view_name=allhosts&output_format=csv
You can use all the views that you see in the webui by adding output_format=[csv|json|python].
You will the data of the table that you see.
You also need to add the creditals as seen in yout question.

Data Factory: AzureSQL in- and output for pipeline activity type AzureMLBatchExecution

In Azure Data Factory, I’m trying to call an Azure Machine Learning model by a Data Factory Pipeline. I want to use a Azure SQL table as input and another Azure SQL table for the output.
First I deployed a Machine Learning (classic) web service. Then I created an Azure Data Factory Pipeline, using a LinkedService (type= ‘AzureML’, using Request URI and API key of the ML-webservice) and a input and output dataset (‘AzureSqlTable’ type).
Deploying and Provisioning is succeeded. The pipeline starts as scheduled, but keeps ‘Running’ without any result. The pipeline activity is not being shown in the Monitor&Manage: Activity Windows.
On different sites and tutorials, I only find JSON-scripts using the activity type ‘AzureMLBatchExecution’ with BLOB in- and outputs. I want to use AzureSQL in- and output but I can’t get this working.
Can someone provide a sample JSON-script or tell me what’s possibly wrong with the code below?
Thanks!
{
"name": "Predictive_ML_Pipeline",
"properties": {
"description": "use MyAzureML model",
"activities": [
{
"type": "AzureMLBatchExecution",
"typeProperties": {},
"inputs": [
{
"name": "AzureSQLDataset_ML_Input"
}
],
"outputs": [
{
"name": "AzureSQLDataset_ML_Output"
}
],
"policy": {
"timeout": "02:00:00",
"concurrency": 3,
"executionPriorityOrder": "NewestFirst",
"retry": 1
},
"scheduler": {
"frequency": "Week",
"interval": 1
},
"name": "My_ML_Activity",
"description": "prediction analysis on ML batch input",
"linkedServiceName": "AzureMLLinkedService"
}
],
"start": "2017-04-04T09:00:00Z",
"end": "2017-04-04T18:00:00Z",
"isPaused": false,
"hubName": "myml_hub",
"pipelineMode": "Scheduled"
}
}
With a little help from a Microsoft technician, I've got this working. The JSON script as mentioned above is only changed in the schedule-section:
"start": "2017-04-01T08:45:00Z",
"end": "2017-04-09T18:00:00Z",
A pipeline is active only between its start time and end time. Because the scheduler is set to weekly, the pipeline is triggered at the start of the week: that date should be within start- and end date. For more details about scheduling, see: https://learn.microsoft.com/en-us/azure/data-factory/data-factory-scheduling-and-execution
The Azure SQL Input dataset should look like this:
{
"name": "AzureSQLDataset_ML_Input",
"properties": {
"published": false,
"type": "AzureSqlTable",
"linkedServiceName": "SRC_SQL_Azure",
"typeProperties": {
"tableName": "dbo.Azure_ML_Input"
},
"availability": {
"frequency": "Week",
"interval": 1
},
"external": true,
"policy": {
"externalData": {
"retryInterval": "00:01:00",
"retryTimeout": "00:10:00",
"maximumRetry": 3
}
}
}
I added the external and policy properties to this dataset (see script above) and after that, it worked.