Backbone how to construct json correctly - json

I want to construct an app of hotel and rooms.
Every hotel can have more rooms, I retrieve this data from external server in XML, I parse it and now I have divided into two arrays: hotel and rooms like this:
hotel.json
[
{
"id": "1",
"name": "Hotel1"
},
{
"id": "2",
"name": "Hotel2"
},
{
"id": "3",
"name": "Hotel3"
}
]
rooms.json
[
{
"id" : "r1",
"hotel_id" : "1",
"name" : "Singola",
"level" : "1"
},
{
"id" : "r1_1",
"hotel_id" : "1",
"name" : "Doppia",
"level" : "2"
},
{
"id" : "r1_3",
"hotel_id" : "1",
"name" : "Doppia Uso singol",
"level" : "1"
},
{
"id" : "r2",
"hotel_id" : "2",
"name" : "Singola",
"level" : "1"
},
{
"id" : "r2_1",
"hotel_id" : "2",
"name" : "Tripla",
"level" : "1"
}
]
Into my backbone app I have to make some controller and some parse to retrieve rooms for its hotel.
I want to know if is better for backbone to construct a Json like that:
[
{
"id": "1",
"name": "Hotel1",
"rooms": [
{
"id" : "r1",
"hotel_id" : "1",
"name" : "Singola",
"level" : "1"
},
{
"id" : "r1_1",
"hotel_id" : "1",
"name" : "Doppia",
"level" : "2"
}
]
},
{
"id": "2",
"name": "Hotel2",
"rooms": [
{
"id" : "r2",
"hotel_id" : "2",
"name" : "Singola",
"level" : "1"
},
{
"id" : "r2_1",
"hotel_id" : "1",
"name" : "Doppia",
"level" : "2"
}
]
},
{
"id": "3",
"name": "Hotel3"
}
]
Which is the better mode for backbone in terms of efficiency and parsing?
I thinked the first case but after construct the app I'm not sure.

I would recommend keeping the data structures flat, as Backbone doesn't really support nested collections without some extra effort. Keeping the data model flat will also make it easier for you to map to REST endpoints (ie. '/hotels/1/rooms', 'rooms/1', etc.).
Just to demonstrate the complexities, here is an example of how one would have to associate a collection to a model:
HotelModel = Backbone.Model.extend({
initialize: function() {
// because initialize is called after parse
_.defaults(this, {
rooms: new RoomCollection
});
},
parse: function(response) {
if (_.has(response, "rooms")) {
this.rooms = new RoomCollection(response.rooms, {
parse: true
});
delete response.rooms;
}
return response;
},
toJSON: function() {
var json = _.clone(this.attributes);
json.rooms = this.rooms.toJSON();
return json;
}
});
With a flat data structure, you could do something like this:
HotelModel = Backbone.Model.extend({
idAttribute:'hotel_id',
urlRoot:'/hotels'
});
RoomModel = Backbone.Model.extend({
idAttribute:'room_id',
urlRoot:'/rooms'
});
HotelCollection = Backbone.Collection.extend({
url: '/hotels',
model:HotelModel
});
RoomCollection = Backbone.Collection.extend({
url: '/rooms',
model:RoomModel,
getByHotelId: function(hotelId){
return this.findWhere({hotel_id:hotelId});
}
});

Related

converted json result structure not same as source

i have test case to compare against the source kept in Kafka message.
I noticed the structured is not same.
no missing field, but the structure is not arranged in the same sequence.
how do i make the result converted same as the source structure?
code to retrieve the message, then decode the base64 format and prettyprint the result.
def responseList = new JsonSlurper().parseText(consumeMessage.getResponseText())
println('response text: \n' + JsonOutput.prettyPrint(JsonOutput.toJson(responseList)))
def decoded = new JsonSlurper().parseText(new String(responseList[0].value.decodeBase64()))
println('response decoded text: \n' + JsonOutput.prettyPrint(JsonOutput.toJson(decoded)))
below is the result printed at console
2019-11-20 16:36:44.934 DEBUG oingDRToAllocationVerification-DynamicID - 10: decoded = JsonSlurper().parseText(new java.lang.String(responseList[0].value.decodeBase64()))
2019-11-20 16:36:44.945 DEBUG oingDRToAllocationVerification-DynamicID - 11: println("response decoded text:
" + JsonOutput.prettyPrint(JsonOutput.toJson(decoded)))
response decoded text:
{
"contexts": [
{
"activityId": "c2884e63-d30d-48a3-965c-0b33202885c2",
"incomingTimestamp": "2019-11-20T08:36:29.0829958Z",
"sourceName": "DispenseOrderService",
"timestamp": "2019-11-20T08:36:29.0829958+00:00",
"userId": "unknown"
}
],
"dispenseOrder": [
{
"dispenseRequestType": "DISPENSEORDER",
"id": "6320112019043628",
"items": [
{
"administrationInstructions": "drug intake information test 123",
"dispenseAsWritten": false,
"id": "cda92ec7-3191-4b7b-a972-7f4545146db4",
"itemId": "Augmentn",
"quantity": 100
},
{
"administrationInstructions": "drug intake information test 234",
"dispenseAsWritten": false,
"id": "19e00776-b08d-47c8-930b-76ddc01f0ff4",
"itemId": "Clopidogrl",
"quantity": 200
},
{
"administrationInstructions": "drug intake information test 456",
"dispenseAsWritten": true,
"id": "0a5b0f4a-366d-4fa7-a0b8-2e8c83f4af13",
"itemId": "Adenosine",
"quantity": 300
}
],
"locationId": "Pharmacy Jewel East",
"piiIdentifiers": {
"doctorId": "b502f046-fb1e-4fcf-8135-a7a13cfb47f6",
"patientId": "fe49b461-8eeb-46d5-b995-a31cdaaa35f3",
"pharmacistId": "b502f046-fb1e-4fcf-8135-a7a13cfb47f6"
},
"priority": 4,
"state": "NEW",
"type": "Test ingest type"
}
],
"messageClass": "DispenseRequestV1",
"messageId": "83e94dac-dfb6-49d7-8ca0-219d155fecce",
"notifications": [
],
"operation": "Add",
"timestamp": "2019-11-20T08:36:29.0952632+00:00"
}
below is the source. the result after conversion is not same as source. as in the structure is not arranged accordingly.
{
"operation" : "Add",
"dispenseOrder" : [ {
"id" : "6320112019043628",
"locationId" : "Pharmacy Jewel East",
"piiIdentifiers" : {
"patientId" : "fe49b461-8eeb-46d5-b995-a31cdaaa35f3",
"doctorId" : "b502f046-fb1e-4fcf-8135-a7a13cfb47f6",
"pharmacistId" : "b502f046-fb1e-4fcf-8135-a7a13cfb47f6"
},
"priority" : 4,
"state" : "NEW",
"type" : "Test ingest type",
"dispenseRequestType" : "DISPENSEORDER",
"items" : [ {
"id" : "cda92ec7-3191-4b7b-a972-7f4545146db4",
"itemId" : "Augmentn",
"quantity" : 100,
"dispenseAsWritten" : false,
"administrationInstructions" : "drug intake information test 123"
}, {
"id" : "19e00776-b08d-47c8-930b-76ddc01f0ff4",
"itemId" : "Clopidogrl",
"quantity" : 200,
"dispenseAsWritten" : false,
"administrationInstructions" : "drug intake information test 234"
}, {
"id" : "0a5b0f4a-366d-4fa7-a0b8-2e8c83f4af13",
"itemId" : "Adenosine",
"quantity" : 300,
"dispenseAsWritten" : true,
"administrationInstructions" : "drug intake information test 456"
} ]
} ],
"messageId" : "83e94dac-dfb6-49d7-8ca0-219d155fecce",
"timestamp" : "2019-11-20T08:36:29.0952632+00:00",
"messageClass" : "DispenseRequestV1",
"contexts" : [ {
"userId" : "unknown",
"timestamp" : "2019-11-20T08:36:29.0829958+00:00",
"activityId" : "c2884e63-d30d-48a3-965c-0b33202885c2",
"incomingTimestamp" : "2019-11-20T08:36:29.0829958Z",
"sourceName" : "DispenseOrderService"
} ],
"notifications" : [ ]
}
As json.org says:
An object is an unordered set of name/value pairs.
So, different JSON methods/libraries might order them in a different way. You shouldn't rely on order of name/value pairs when working with JSON.
(If order is very important to you, you might try using suggested solution from this post.)

How can I query documents where a values match a parent object and child object?

I'm new to Mongoose and I've been trying for days on how to solve this issue and I'm still having trouble.
My document object is below.
"person" : [
{
"title" : "front-end developer",
"skills" : [
{
"name" : "js",
"project" : "1",
},
{
"name" : "CSS",
"project" : "5",
}
]
},
{
"title" : "software engineer",
"skills" : [
{
"name" : "Java",
"project" : "1",
},
{
"name" : "c++",
"project" : "5",
}
]
}
]
What I would like accomplish is to return all documents that have person.title = software engineer AND person.skills.name = c++. The skill c++ has to belong to the software engineer person object. So returning documents when a front-end developer has c++ is not ideal.
Here's what I've tried doing so far. The query works but it returns documents which meet either one of the conditions and not both.
var query = {
_id: { $nin: [userID] },
$and: [
{person: {
$elemMatch: {
name: {$regex: `^${titleName}$`, $options: "i"}
}
}},
{[`person.skills`]: {
$elemMatch: {
name: {$regex: `^${skillName}$`, $options: "i"}
}
}}
]
};
Any help would be greatly appreciated. Thanks!
You can try below query. Move the and condition inside the $elemMatch
var query = {
"_id": {
"$nin": [userID]
},
"person": {
"$elemMatch": {
"name":{$regex: `^${titleName}$`, $options: "i"},
"skills.name": {$regex: `^${skillName}$`, $options: "i"}
}
}
};

Custom analyzer appearing in type mapping but not working in Elasticsearch

I'm trying to add a custom analyzer to my index while also mapping that analyzer to a property on a type. Here is my JSON object for doing this:
{ "settings" : {
"analysis" : {
"analyzer" : {
"test_analyzer" : {
"type" : "custom",
"tokenizer": "standard",
"filter" : ["lowercase", "asciifolding"],
"char_filter": ["html_strip"]
}
}
}
},
"mappings" : {
"test" : {
"properties" : {
"checkanalyzer" : {
"type" : "string",
"analyzer" : "test_analyzer"
}
}
}
}
}
I know this analyzer works because I've tested it using /wp2/_analyze?analyzer=test_analyzer -d '<p>Testing analyzer.</p>' and also it shows up as the analyzer for the checkanalyzer property when I check /wp2/test/_mapping. However, if I add a document like {"checkanalyzer": "<p>The tags should not show up</p>"}, the HTML tags don't get stripped out when I retrieve the document using the _search endpoint. Am I misunderstanding how the mapping works or is there something wrong with my JSON object? I'm dynamically creating the wp2 index and also the test type when I make this call to Elasticsearch, not sure if that matters.
The html doesn't get removed from the source, it gets removed from the terms generated by that source. You can see this if you use a terms aggregation:
POST /test_index/_search
{
"aggs": {
"checkanalyzer_field_terms": {
"terms": {
"field": "checkanalyzer"
}
}
}
}
{
"took": 77,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 1,
"max_score": 1,
"hits": [
{
"_index": "test_index",
"_type": "test",
"_id": "1",
"_score": 1,
"_source": {
"checkanalyzer": "<p>The tags should not show up</p>"
}
}
]
},
"aggregations": {
"checkanalyzer_field_terms": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "not",
"doc_count": 1
},
{
"key": "should",
"doc_count": 1
},
{
"key": "show",
"doc_count": 1
},
{
"key": "tags",
"doc_count": 1
},
{
"key": "the",
"doc_count": 1
},
{
"key": "up",
"doc_count": 1
}
]
}
}
}
Here's some code I used to test it:
http://sense.qbox.io/gist/2971767aa0f5949510fa0669dad6729bbcdf8570
Now if you want to completely strip out the html prior to indexing and storing the content as is, you can use the mapper attachment plugin - in which when you define the mapping, you can categorize the content_type to be "html."
The mapper attachment is useful for many things especially if you are handling multiple document types, but most notably - I believe just using this for the purpose of stripping out the html tags is sufficient enough (which you cannot do with the html_strip char filter).
Just a forewarning though - NONE of the html tags will be stored. So if you do need those tags somehow, I would suggest defining another field to store the original content. Another note: You cannot specify multifields for mapper attachment documents, so you would need to store that outside of the mapper attachment document. See my working example below.
You'll need to result in this mapping:
{
"html5-es" : {
"aliases" : { },
"mappings" : {
"document" : {
"properties" : {
"delete" : {
"type" : "boolean"
},
"file" : {
"type" : "attachment",
"fields" : {
"content" : {
"type" : "string",
"store" : true,
"term_vector" : "with_positions_offsets",
"analyzer" : "autocomplete"
},
"author" : {
"type" : "string",
"store" : true,
"term_vector" : "with_positions_offsets"
},
"title" : {
"type" : "string",
"store" : true,
"term_vector" : "with_positions_offsets",
"analyzer" : "autocomplete"
},
"name" : {
"type" : "string"
},
"date" : {
"type" : "date",
"format" : "strict_date_optional_time||epoch_millis"
},
"keywords" : {
"type" : "string"
},
"content_type" : {
"type" : "string"
},
"content_length" : {
"type" : "integer"
},
"language" : {
"type" : "string"
}
}
},
"hash_id" : {
"type" : "string"
},
"path" : {
"type" : "string"
},
"raw_content" : {
"type" : "string",
"store" : true,
"term_vector" : "with_positions_offsets",
"analyzer" : "raw"
},
"title" : {
"type" : "string"
}
}
}
},
"settings" : { //insert your own settings here },
"warmers" : { }
}
}
Such that in NEST, I will assemble the content as such:
Attachment attachment = new Attachment();
attachment.Content = Convert.ToBase64String(File.ReadAllBytes("path/to/document"));
attachment.ContentType = "html";
Document document = new Document();
document.File = attachment;
document.RawContent = InsertRawContentFromString(originalText);
I have tested this in Sense - results are as follows:
"file": {
"_content": "PGh0bWwgeG1sbnM6TWFkQ2FwPSJodHRwOi8vd3d3Lm1hZGNhcHNvZnR3YXJlLmNvbS9TY2hlbWFzL01hZENhcC54c2QiPg0KICA8aGVhZCAvPg0KICA8Ym9keT4NCiAgICA8aDE+VG9waWMxMDwvaDE+DQogICAgPHA+RGVsZXRlIHRoaXMgdGV4dCBhbmQgcmVwbGFjZSBpdCB3aXRoIHlvdXIgb3duIGNvbnRlbnQuIENoZWNrIHlvdXIgbWFpbGJveC48L3A+DQogICAgPHA+wqA8L3A+DQogICAgPHA+YXNkZjwvcD4NCiAgICA8cD7CoDwvcD4NCiAgICA8cD4xMDwvcD4NCiAgICA8cD7CoDwvcD4NCiAgICA8cD5MYXZlbmRlci48L3A+DQogICAgPHA+wqA8L3A+DQogICAgPHA+MTAvNiAxMjowMzwvcD4NCiAgICA8cD7CoDwvcD4NCiAgICA8cD41IDA5PC9wPg0KICAgIDxwPsKgPC9wPg0KICAgIDxwPjExIDQ3PC9wPg0KICAgIDxwPsKgPC9wPg0KICAgIDxwPkhhbGxvd2VlbiBpcyBpbiBPY3RvYmVyLjwvcD4NCiAgICA8cD7CoDwvcD4NCiAgICA8cD5qb2c8L3A+DQogIDwvYm9keT4NCjwvaHRtbD4=",
"_content_length": 0,
"_content_type": "html",
"_date": "0001-01-01T00:00:00",
"_title": "Topic10"
},
"delete": false,
"raw_content": "<h1>Topic10</h1><p>Delete this text and replace it with your own content. Check your mailbox.</p><p> </p><p>asdf</p><p> </p><p>10</p><p> </p><p>Lavender.</p><p> </p><p>10/6 12:03</p><p> </p><p>5 09</p><p> </p><p>11 47</p><p> </p><p>Halloween is in October.</p><p> </p><p>jog</p>"
},
"highlight": {
"file.content": [
"\n <em>Topic10</em>\n\n Delete this text and replace it with your own content. Check your mailbox.\n\n  \n\n asdf\n\n  \n\n 10\n\n  \n\n Lavender.\n\n  \n\n 10/6 12:03\n\n  \n\n 5 09\n\n  \n\n 11 47\n\n  \n\n Halloween is in October.\n\n  \n\n jog\n\n "
]
}

Finding JSON objects in mongoDB

I'm trying to find objects using the built it queries and It just doesn't work..
My JSON file is something like this:
{ "Text1":
{
"id":"2"
},
"Text2":
{
"id":"2,3"
},
"Text3":
{
"id":"1"
}
}
And I write this db.myCollection.find({"id":2})
And it doesn't find anything.
When I write db.myCollection.find() it shows all the data as it should.
Anyone knows how to do it correctly?
Its hard to change the data-structure but as you want just your matching sub-document and you don't know where is your target sub-document (for example the query should be on Text1 or Text2 , ...) there is a good data structure for this:
{
"_id" : ObjectId("548dd9261a01c68fab8d67d7"),
"pair" : [
{
"id" : "2",
"key" : "Text1"
},
{
"id" : [
"2",
"3"
],
"key" : "Text2"
},
{
"id" : "1",
"key" : "Text3"
}
]
}
and your query is:
db.myCollection.findOne({'pair.id' : "2"} , {'pair.$':1, _id : -1}).pair // there is better ways (such as aggregation instead of above query)
as result you will have:
{
"0" : {
"id" : "2",
"key" : "Text1"
}
}
Update 1 (newbie way)
If you want all the document not just one use this
var result = [];
db.myCollection.find({'pair.id' : "2"} , {'pair.$':1, _id : -1}).forEach(function(item)
{
result.push(item.pair);
});
// the output will be in result
Update 2
Use this query to get all sub-documents
db.myCollection.aggregate
(
{ $unwind: '$pair' },
{ $match : {'pair.id' : "2"} }
).result
it produce output as
{
"0" : {
"_id" : ObjectId("548deb511a01c68fab8d67db"),
"pair" : {
"id" : "2",
"key" : "Text1"
}
},
"1" : {
"_id" : ObjectId("548deb511a01c68fab8d67db"),
"pair" : {
"id" : [
"2",
"3"
],
"key" : "Text2"
}
}
}
Since your are query specify a field in a subdocument this is what will work. see .find() documentation.
db.myCollection.find({"Text1.id" : "2"}, {"Text1.id": true})
{ "_id" : ObjectId("548dd798e2fa652e675af11d"), "Text1" : { "id" : "2" } }
If the query is on "Text1" or "Text2" the best thing to do here as mention in the accepted answer is changing you document structure. This can be easily done using the "Bulk" API.
var bulk = db.mycollection.initializeOrderedBulkOp(),
count = 0;
db.mycollection.find().forEach(function(doc) {
var pair = [];
for(var key in doc) {
if(key !== "_id") {
var id = doc[key]["id"].split(/[, ]/);
pair.push({"key": key, "id": id});
}
}
bulk.find({"_id": doc._id}).replaceOne({ "pair": pair });
count++; if (count % 300 == 0){
// Execute per 300 operations and re-Init
bulk.execute();
bulk = db.mycollection.initializeOrderedBulkOp();
}
})
// Clean up queues
if (count % 300 != 0 )
bulk.execute();
Your document now look like this:
{
"_id" : ObjectId("55edddc6602d0b4fd53a48d8"),
"pair" : [
{
"key" : "Text1",
"id" : [
"2"
]
},
{
"key" : "Text2",
"id" : [
"2",
"3"
]
},
{
"key" : "Text3",
"id" : [
"1"
]
}
]
}
Running the following query:
db.mycollection.aggregate([
{ "$project": {
"pair": {
"$setDifference": [
{ "$map": {
"input": "$pair",
"as": "pr",
"in": {
"$cond": [
{ "$setIsSubset": [ ["2"], "$$pr.id" ]},
"$$pr",
false
]
}
}},
[false]
]
}
}}
])
returns:
{
"_id" : ObjectId("55edddc6602d0b4fd53a48d8"),
"pair" : [
{
"key" : "Text1",
"id" : [
"2"
]
},
{
"key" : "Text2",
"id" : [
"2",
"3"
]
}
]
}

How to get JSON into a Freemarker Template (FTL)

I've got a MongoDB which I query and the result I serialize and this string I send to my ftl template. Below is the serialized result:
[
{
"id" : "10",
"title" : "Test Title 1",
"partner" : {
"id" : "1",
"name" : "partner 1 ",
"location" : [{
"locationname" : "locationname 1a",
"city" : ""
},{
"locationname" : "locationname 1b",
"city" : ""
}]
}
},
{
"id" : "6",
"title" : "Test Title 2",
"partner" : {
"id" : "1",
"name" : "partner 2 ",
"location" : [{
"locationname" : "locationname 2b",
"city" : ""
}]
}
}
]
How would I use this in my ftl template?
Thanks for any help.
If you really have to serialize before giving the result to FreeMarker... The JSON syntax for maps and lists happens to be a subset of FTL, so assuming the serialized result is in res, res?eval will give you a list of maps.