nodejs json-schema fail to read schema - json

I have an express app, which has a post method(the post is a json type):
server.js(simplified version):
app.post('/listener/v1/event/', function(req, res) {
.
.
var event = req.body;
var validator = require("./validator");
validator.validate(event);
}
validator.js contains the validation for the json:
var jsonschemavalidate = require("json-schema");
var basicSchema = require('fs').readFileSync('./schema.json', 'utf8');
exports.validate = function (event) {
console.log(jsonschemavalidate.validate(event, basicSchema).errors);
}
The schema.json:
{
name : "test",
type : 'object',
properties : {
event_id : { type : 'string' },
timestamp : { type : 'string' }
}
}
For the input I use curl:
curl -i -X POST -H 'Content-Type: application/json' -d '{"event_id": "NedaleGassss", "timestamp": "a2009321"}' http://localhost:3000/listener/v1/event/
The output is as follows:
[ { property: '',
message: 'Invalid schema/property definition {\n name : "test",\n type : "object",\n additionalProperties : false,\n properties :\n {\n event_id : { type : "string" },\n timestamp \t: { type : "string" }\n }\n}' } ]

Your schema is invalid, as the error says. The schema should also be valid JSON,
so properties and strings should be double quoted:
{
"name" : "test",
"type" : "object",
"properties" : {
"event_id" : { "type" : "string" },
"timestamp" : { "type" : "string" }
}
}
This should do the trick, (unless you figured it out already in the past year)
And also:
var basicSchema = require('fs').readFileSync('./schema.json', 'utf8');
could probably be replaced by:
var basicSchema = require('./schema');

Related

How to retrieve all key-value pairs avoiding key duplication from JSON in Groovy script

I am totally new to groovy script and would like some help to solve this out. I have a JSON response I want to manipulate and get desired parameters back by avoiding duplication. The Json response does not have indexes like 0,1,2.. that I can iterate through.
Here is the response that I want to work with:
{
"AuthenticateV2" : {
"displayName" : "Verification of authentication",
"description" : "notification ",
"smsTemplate" : "authentication.v2.0_sms",
"emailHeaderTemplate" : "v2.0_header",
"emailBodyTemplate" : "html",
"parameters" : {
"displayName" : "USER_DISPLAY_NAME",
"actionTokenURL" : "VERIFICATION_LINK",
"customToken" : "VERIFICATION_CODE"
},
"supportedPlans" : [
"connectGo"
]
},
"PasswordRecovery" : {
"displayName" : "Verification of password recovery",
"description" : "notification",
"smsTemplate" : "recovery.v1.0_sms",
"emailHeaderTemplate" : "recovery.v1.0_header",
"emailBodyTemplate" : "recovery.v1.0_body_html",
"parameters" : {
"displayName" : "USER_DISPLAY_NAME",
"actionTokenURL" : "VERIFICATION_LINK",
"customToken" : "VERIFICATION_CODE",
"adminInitiated" : false,
"authnId" : "AUTHENTICATION_IDENTIFIER",
"authnType" : "EMAIL",
"user" : {
"displayName" : "USER_DISPLAY_NAME"
}
},
"supportedPlans" : [
"connectGo"
]
},
"PasswordReset" : {
"displayName" : "password reset",
"description" : "notification",
"smsTemplate" : "recovery.v1.0_sms",
"emailHeaderTemplate" : "recovery.v1.0_header",
"emailBodyTemplate" : "html",
"parameters" : {
"displayName" : "USER_DISPLAY_NAME",
"user" : {
"displayName" : "USER_DISPLAY_NAME"
}
}
The expected output that I want to have:
{
"displayName" : "USER_DISPLAY_NAME",
"actionTokenURL" : "VERIFICATION_LINK",
"customToken" : "VERIFICATION_CODE",
"customToken" : "VERIFICATION_CODE",
"adminInitiated" : false,
"authnId" : "AUTHENTICATION_IDENTIFIER",
"authnType" : "EMAIL"
}
I need to retrieve all fields under parameters tag and also want to avoid duplication
You should first get familiar with parsing and producing JSON in Groovy.
Then, assuming the provided response is a valid JSON (it's not - there are 2 closing curlies (}) missing at the end) to get all the parameters keys merged into one JSON we have to convert the JSON string into a Map object first using JsonSlurper:
def validJsonResponse = '<your valid JSON string>'
Map parsedResponse = new JsonSlurper().parseText(validJsonResponse) as Map
Now, when we have a parsedResponse map we can iterate over all the root items in the response and transform them into the desired form (which is all the unique parameters keys) using Map::collectEntries method:
Map uniqueParameters = parsedResponse.collectEntries { it.value['parameters'] }
Finally, we can convert the uniqueParameters result back into a pretty printed JSON string using JsonOuput:
println JsonOutput.prettyPrint(JsonOutput.toJson(uniqueParameters))
After applying all the above we'll get the output
{
"displayName": "USER_DISPLAY_NAME",
"actionTokenURL": "VERIFICATION_LINK",
"customToken": "VERIFICATION_CODE",
"adminInitiated": false,
"authnId": "AUTHENTICATION_IDENTIFIER",
"authnType": "EMAIL",
"user": {
"displayName": "USER_DISPLAY_NAME"
}
}
If you want to get rid of user entry from the final output just remove it from the resulting uniqueParameters map (uniqueParameters.remove('user')) before converting it back to JSON string.

Template body contains invalid JSON: invalid character

I've got a CloudFormation template that creates an SNS topic and subscription
{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources" : {
"EmailSNSTopic": {
"Type" : "AWS::SNS::Topic",
"Properties" : {
"DisplayName" : "${display_name}"
}
},
"MySubscription": {
"Type": "AWS::SNS::Subscription",
"Properties": {
"TopicArn" : { "Ref" : "EmailSNSTopic" },
"${details}"
}
}
},
"Outputs" : {
"ARN" : {
"Description" : "Email SNS Topic ARN",
"Value" : { "Ref" : "EmailSNSTopic" }
}
}
}
Which I'm trying to call via terrform.
But I keep getting this error
Error: "template_body" contains an invalid JSON: invalid character '{' looking for beginning of object key string
My Terraform configuration looks like this.
provider "aws" {
region = "eu-west-2"
}
data "template_file" "sns_stack" {
template = file("${path.module}/templates/email-sns-stack.json.tpl")
vars = {
display_name = var.display_name
details = join(",", formatlist("{ \"Endpoint\": \"%s\", \"Protocol\": \"%s\" }", var.email_list, var.protocol))
}
}
resource "aws_cloudformation_stack" "sns_topic" {
name = var.stack_name
template_body = data.template_file.sns_stack.rendered
tags = merge(
map("Name", var.stack_name)
)
}
And my variables.tf looks like this
default = "Admin"
}
variable "email_list" {
default = [
"foo#foo.com",
"bar#bar.com"
]
}
variable "protocol" {
default = "email"
}
variable "stack_name" {
default = "sns-test"
}
I expect that ${details} should spit out my Endpoint and Protocol but it doesn't.
What am I doing wrong?
What you want to achieve is rather complex, but doable. You can do this using the following template:
{
"AWSTemplateFormatVersion": "2010-09-09",
"Resources" : ${jsonencode(
merge({for idx, email_address in email_list:
"EmailSubs${idx}" => {
Type = "AWS::SNS::Subscription"
Properties = {
"Endpoint" = email_address
"Protocol" = protocol
"TopicArn" = { "Ref" = "EmailSNSTopic" }
}
}},
{
"EmailSNSTopic" = {
"Type" = "AWS::SNS::Topic",
"Properties" = {
"DisplayName" = "${display_name}"
}
}}
))},
"Outputs" : {
"ARN" : {
"Description" : "Email SNS Topic ARN",
"Value" : { "Ref" : "EmailSNSTopic" }
}
}
}
and TF code:
locals {
template_body = templatefile("./email-sns-stack2.json.tpl", {
display_name = var.display_name
email_list = var.email_list
protocol = var.protocol
})
}
resource "aws_cloudformation_stack" "sns_topic" {
name = var.stack_name
template_body = local.template_body
tags = merge(
map("Name", var.stack_name)
)
}

Querying with elasticsearch and jcard json

I have a data set containing vcards based on the JSON Jcard Mapping (https://www.rfc-editor.org/rfc/rfc7095) The problem is
that I want to search on the 'fn' field only.
The vcard data has the following format.
["vcard",
[
["version", {}, "text", "4.0"],
["fn", {}, "text", "John Doe"],
["gender", {}, "text", "M"],
["categories", {}, "text", "computers", "cameras"],
...
]
]
I'm creating a vcard document like this
> curl -X POST localhost:9200/vcards/id1 -d '{
"id":"id1",
"vcardArray" : ["vcard",
[
["version", {}, "text", "4.0"],
["fn", {}, "text", "John Doe"],
["gender", {}, "text", "M"]
]
],
"status":["registered"]
}'
Normally you would create a specific mapping so when the document is analysed a new index is created and searching for
a fn field would look something like this....
curl -v -X POST http://localhost:9200/vcards/_search -d '{ "query" :
{ "bool" : { "must": { "match" : {
"vcardArray.vcard.fn" :
{ "query" : "Rik Ribbers" , "type" : "phrase" }
} } } }
}'
A potential mapping would look like this, but this one is not working
> curl -X PUT http://localhost:9200/vcards -d '
{
"mappings": {
"vcardArray" : {
"type" : "nested",
"properties" : {
"vcard" : {
"type" : "index",
"index" : "not_analyzed"
}
}
}
}
}'
Any pointer to the correct mapping or query would be helpful.

Sub-records in Avro with Morphlines

I'm trying to convert JSON into Avro using the kite-sdk morphline module. After playing around I'm able to convert the JSON into Avro using a simple schema (no complex data types).
Then I took it one step further and modified the Avro schema as displayed below (subrec.avsc). As you can see the schema consist of a subrecord.
As soon as I tried to convert the JSON to Avro using the morphlines.conf and the subrec.avsc it failed.
Somehow the JSON paths "/record_type[]/alert/action" are not translated by the toAvro function.
The morphlines.conf
morphlines : [
{
id : morphline1
importCommands : ["org.kitesdk.**"]
commands : [
# Read the JSON blob
{ readJson: {} }
{ logError { format : "record: {}", args : ["#{}"] } }
# Extract JSON
{ extractJsonPaths { flatten: false, paths: {
"/record_type[]/alert/action" : /alert/action,
"/record_type[]/alert/signature_id" : /alert/signature_id,
"/record_type[]/alert/signature" : /alert/signature,
"/record_type[]/alert/category" : /alert/category,
"/record_type[]/alert/severity" : /alert/severity
} } }
{ logError { format : "EXTRACTED THIS : {}", args : ["#{}"] } }
{ extractJsonPaths { flatten: false, paths: {
timestamp : /timestamp,
event_type : /event_type,
source_ip : /src_ip,
source_port : /src_port,
destination_ip : /dest_ip,
destination_port : /dest_port,
protocol : /proto,
} } }
# Create Avro according to schema
{ logError { format : "WE GO TO AVRO"} }
{ toAvro { schemaFile : /etc/flume/conf/conf.empty/subrec.avsc } }
# Create Avro container
{ logError { format : "WE GO TO BINARY"} }
{ writeAvroToByteArray { format: containerlessBinary } }
{ logError { format : "DONE!!!"} }
]
}
]
And the subrec.avsc
{
"type" : "record",
"name" : "Event",
"fields" : [ {
"name" : "timestamp",
"type" : "string"
}, {
"name" : "event_type",
"type" : "string"
}, {
"name" : "source_ip",
"type" : "string"
}, {
"name" : "source_port",
"type" : "int"
}, {
"name" : "destination_ip",
"type" : "string"
}, {
"name" : "destination_port",
"type" : "int"
}, {
"name" : "protocol",
"type" : "string"
}, {
"name": "record_type",
"type" : ["null", {
"name" : "alert",
"type" : "record",
"fields" : [ {
"name" : "action",
"type" : "string"
}, {
"name" : "signature_id",
"type" : "int"
}, {
"name" : "signature",
"type" : "string"
}, {
"name" : "category",
"type" : "string"
}, {
"name" : "severity",
"type" : "int"
}
] } ]
} ]
}
The output on { logError { format : "EXTRACTED THIS : {}", args : ["#{}"] } } I output the following:
[{
/record_type[]/alert / action = [allowed],
/record_type[]/alert / category = [],
/record_type[]/alert / severity = [3],
/record_type[]/alert / signature = [GeoIP from NL,
Netherlands],
/record_type[]/alert / signature_id = [88006],
_attachment_body = [{
"timestamp": "2015-03-23T07:42:01.303046",
"event_type": "alert",
"src_ip": "1.1.1.1",
"src_port": 18192,
"dest_ip": "46.231.41.166",
"dest_port": 62004,
"proto": "TCP",
"alert": {
"action": "allowed",
"gid": "1",
"signature_id": "88006",
"rev": "1",
"signature" : "GeoIP from NL, Netherlands ",
"category" : ""
"severity" : "3"
}
}],
_attachment_mimetype=[json/java + memory],
basename = [simple_eve.json]
}]
UPDATE 2017-06-22
you MUST populate the data in the structure in order for this to work, by using addValues or setValues
{
addValues {
micDefaultHeader : [
{
eventTimestampString : "2017-06-22 18:18:36"
}
]
}
}
after debugging the sources of morphline toAvro, it appears that the record is the first object to be evaluated, no matter what you put in your mappings structure.
the solution is quite simple, but unfortunately took a little extra time, eclipse, running the flume agent in debug mode, cloning the source code and lots of coffee.
here it goes.
my schema:
{
"type" : "record",
"name" : "co_lowbalance_event",
"namespace" : "co.tigo.billing.cboss.lowBalance",
"fields" : [ {
"name" : "dummyValue",
"type" : "string",
"default" : "dummy"
}, {
"name" : "micDefaultHeader",
"type" : {
"type" : "record",
"name" : "mic_default_header_v_1_0",
"namespace" : "com.millicom.schemas.root.struct",
"doc" : "standard millicom header definition",
"fields" : [ {
"name" : "eventTimestampString",
"type" : "string",
"default" : "12345678910"
} ]
}
} ]
}
morphlines file:
morphlines : [
{
id : convertJsonToAvro
importCommands : ["org.kitesdk.**"]
commands : [
{
readJson {
outputClass : java.util.Map
}
}
{
addValues {
micDefaultHeader : [{}]
}
}
{
logDebug { format : "my record: {}", args : ["#{}"] }
}
{
toAvro {
schemaFile : /home/asarubbi/Development/test/co_lowbalance_event.avsc
mappings : {
"micDefaultHeader" : micDefaultHeader
"micDefaultHeader/eventTimestampString" : eventTimestampString
}
}
}
{
writeAvroToByteArray {
format : containerlessJSON
codec : null
}
}
]
}
]
the magic lies here:
{
addValues {
micDefaultHeader : [{}]
}
}
and in the mappings:
mappings : {
"micDefaultHeader" : micDefaultHeader
"micDefaultHeader/eventTimestampString" : eventTimestampString
}
explanation:
inside the code the first field name that is evaluated is micDefaultHeader of type RECORD. as there's no way to specify a default value for a RECORD (logically correct), the toAvro code evaluates this, does not get any value configured in mappings and therefore it fails at it detects (wrongly) that the record is empty when it shouldn't.
however, taking a look at the code, you may see that it requires a Map object, containing no values to please the parser and continue to the next element.
so we add a map object using the addValues and fill it with an empty map [{}]. notice that this must match the name of the record that is causing you an empty value. in my case "micDefaultHeader"
feel free to comment if you have a better solution, as this looks like a "dirty fix"

Matching data in JsonPath with wiremock

I'm trying to create mocks for my login procedure. I use POST method with a couple of fields and login object (with login, password, etc.)
For that I'm using JsonPath. Code below:
{
"request": {
"method": "POST",
"url": "/login",
"bodyPatterns" : [
{"matchesJsonPath" : "$.method"},
{"matchesJsonPath" : "$.params[?(#.clientVersion == "1")]"},
{"matchesJsonPath" : "$.params.login"},
{"matchesJsonPath" : "$.params.password"}
]
},
"response": {
"status": 200,
"bodyFileName": "login.json"
}
}
I'm checking the clientVersion because it's similar to the examples.
My problem is, that with te given POST JSON:
{
"method": "login",
"params": {
"clientVersion": "1",
"login": "test#test.com",
"password": "681819535da188b6ef2"
}
}
I receive 404.
However, when I change
{"matchesJsonPath" : "$.params[?(#.clientVersion == "1")]"},
to normal
{"matchesJsonPath" : "$.params.clientVersion"},
everything works just fine.
So - how to check inside wiremock, using matchesJsonPath if given field equals some value?
How to apply it to the root field like method in my case?
And while we're at it - I had similar problems with checking if the value is not null. I tried to apply regular expressions and such - no luck.
It's working in my case :
wiremock:
"request": {
"urlPathPattern": "/api/authins-portail-rs/authins/inscription/infosperso",
"bodyPatterns" : [
{"matchesJsonPath" : "$[?(#.nir == '123456789')]"},
{"matchesJsonPath" : "$[?(#.nomPatronyme == 'aubert')]"},
{"matchesJsonPath" : "$[?(#.prenoms == 'christian')]"},
{"matchesJsonPath" : "$[?(#.dateNaissance == '01/09/1952')]"}
],
"method": "POST"
}
Json:
{
"nir": "123456789",
"nomPatronyme": "aubert",
"prenoms": "christian",
"dateNaissance": "01/09/1952"
}
Following worked for me.
"matchesJsonPath" : "$.rootItem.itemA[0].item..[?(#.fieldName=='file')]"
Json :
{
"rootItem" : {
"itemA" : [
{
"item" : {
"fieldName" : "file",
"name" : "test"
}
}
]
}
}
Wiremock
{
"request" : {
"urlPattern" : "/testjsonpath",
"method" : "POST",
"bodyPatterns" : [ {
"matchesJsonPath" : "$.rootItem.itemA[0].item..[?(#.fieldName=='file')]"
} ]
},
"response" : {
"status" : 200,
"body" : "{\"result\": \"success\"}",
"headers" : {
"Content-Type" : "application/json"
}
}
}
Update Wiremock. It should work with newer versions >= 2.0.0-beta. Its JsonPath dependency was very outdated (GitHub #261).
Using the double dots operator is semantically not the same, as the filter will also match for elements with the same name deeper down the tree.
try with double dots operator (recursive)
$..params[?(#.clientVersion == "1")]