Google Scripts WebApp POST body - json

I feel like there must be something obvious I'm missing, but I can't understand what is going on with how Google WebApps handle POST requests.
My script has a doPost(e) function that recieves the Post request, as I can see logged in my spreadsheet, but when I try and access the contents of the POST body it says it is called "FileUpload". How do I actually access the data within the POST body?
Below is my GS code;
function doPost(e) {
var json = e.postData.contents()
var data = JSON.parse(json)
var test = e.contentlength
var ss = SpreadsheetApp.openById("1GH1pT1BvJ-hMWrFNKUDD1KzBoEHd_TzKKk71Znk250g")
SpreadsheetApp.setActiveSpreadsheet(ss)
var sheet = SpreadsheetApp.getActiveSpreadsheet().getActiveSheet();
var range = sheet.getRange("A1:A1");
sheet.setActiveRange(range);
range.setValue(e);
range = sheet.getRange("A2:A2")
range.setValue(json)
range = sheet.getRange("A3:A3")
range.setValue(test)
range = sheet.getRange("A4:A4")
range.setValue(data)
}
Below is the body of the POST request I am sending using hurl.it
{
"v": 1,
"matches_filters": {
"current": [
10
],
"previous": []
},
"meta": {
"v": 1,
"action": "added",
"bullshit":"sooootrue"
"object": "organization",
"id": 3906,
"company_id": 1146977,
"user_id": 1718250,
"timestamp": 1479142419,
"permitted_user_ids": [
1718250,
1558089,
1635656,
1638428,
1638433,
1638438,
1638443,
1661971,
1661996,
1662001,
1662006,
1662011,
1662016,
1662021,
1662026,
1662031,
1662036,
1662041,
1665942,
1665954,
1683795,
1718611,
1718619,
1751500,
1844875,
1871026
],
"trans_pending": false,
"is_bulk_update": false,
"matches_filters": {
"current": [
10
],
"previous": []
}
},
"retry": 0,
"current": {
"id": 3906,
"company_id": 1146977,
"owner_id": 1718250,
"name": "Test Corporation [Dallas, TX, United States]",
"open_deals_count": 0,
"related_open_deals_count": 0,
"closed_deals_count": 0,
"related_closed_deals_count": 0,
"email_messages_count": 0,
"people_count": 0,
"activities_count": 0,
"done_activities_count": 0,
"undone_activities_count": 0,
"reference_activities_count": 0,
"files_count": 0,
"notes_count": 0,
"followers_count": 0,
"won_deals_count": 0,
"related_won_deals_count": 0,
"lost_deals_count": 0,
"related_lost_deals_count": 0,
"active_flag": true,
"category_id": null,
"picture_id": null,
"country_code": null,
"first_char": "t",
"update_time": "2016-11-14 16:53:39",
"add_time": "2016-11-14 16:53:39",
"visible_to": "3",
"next_activity_date": null,
"next_activity_time": null,
"next_activity_id": null,
"last_activity_id": null,
"last_activity_date": null,
"address": "4 Main Street, Dallas, TX, United States",
"address_lat": 32.7856712,
"address_long": -96.773262,
"address_subpremise": "",
"address_street_number": "4",
"address_route": "Main Street",
"address_sublocality": "",
"address_locality": "Dallas",
"address_admin_area_level_1": "Texas",
"address_admin_area_level_2": "Dallas County",
"address_country": "United States",
"address_postal_code": "75226",
"address_formatted_address": "4 Main St, Dallas, TX 75226, USA",
"040fd7fe54821ee6658b1079bdc0191037dcabb7": null,
"ea3923782939325226d83a034fcfdcfd9bf14ac6": null,
"ef5d2ea2f8ca035a742f0b520b35236811d3f9ab": "19",
"7bd6a9cb46233100cdf6f46857394edcc128ed49": null,
"c84920debaadd9619b0f0d5c6f6a32a0b69945ab": null,
"4d83d5725c316f602420447b543b7cad93a4227e": "39",
"edit_name": true
},
"previous": null,
"event": "added.organization"
}
This is what is logged in the spreadsheet:
{parameter={}, contextPath=, contentLength=3080, queryString=null, parameters={}, postData=FileUpload}
I recieve an error that says:
TypeError: Cannot call property contents in object FileUpload. It is not a function, it is "string". (line 9, file "Code", project "PipeDrive notifcations")
If anyone has any idea what I'm doing wrong I'd love to hear it. Thank you very much!

So I've fixed my issue.
I invoked .contents() on postData, however I should have just wrote .contents. Removing the () fixed my issue.

Related

How to parse unnest json data using cypher query language to neo4j database?

In the below json file I want to access "934934507945312256", "934934503604174848",.... and then the keys inside them.
But after using UNWIND clause I am unable to access the data of these keys(quote_count,reply_count,etc.) as these keys("934934507945312256" ,"934934503604174848",...) are randomly generated.
{
"934934507945312256": {
"quote_count": 0,
"reply_count": 0,
"hashtags": null,
"datetime": "2017-11-26 23:58:51",
"date": "2017-11-26",
"like_count": 0,
"verified": "False",
"sentiment": 0,
"author": "Judyđź’ŻThe Resistance",
"location": "Hollywood, California USA🇺🇸",
"tid": "934934507945312256",
"retweet_count": 0,
"type": "retweet",
"media_list": null,
"quoted_source_id": null,
"url_list": null,
"tweet_text": "RT #kylegriffin1: Reminder: The Senate Judiciary Committee gave Jared Kushner a November 27 deadline to turn over the missing records… ",
"author_profile_image": "https://pbs.twimg.com/profi...",
"author_screen_name": "jgirl66",
"author_id": "23737528",
"lang": "en",
"keywords_processed_list": [
"reminder",
"senate judiciary committee",
"kushner november",
"deadline"
],
"retweet_source_id": "934872065471115264",
"mentions": [
"kylegriffin1"
],
"replyto_source_id": null
},
"934934503604174848": {
"quote_count": 0,
"reply_count": 2,
"hashtags": [
"MissUniverse",
"Thailand"
],
"datetime": "2017-11-26 23:58:50",
"date": "2017-11-26",
"like_count": 38,
"verified": "False",
"sentiment": 0,
"author": "P'Hmee7.5",
"location": "Bangkok, Thailand",
"tid": "934934503604174848",
"retweet_count": 105,
"type": "Tweet",
"media_list": null,
"quoted_source_id": null,
"url_list": null,
"tweet_text": "รอโหวต มรญ #MissUniverse #Thailand",
"author_profile_image": "
Thumbnail
",
"author_screen_name": "Peehmee75",
"author_id": "700720806972624897",
"lang": "th",
"keywords_processed_list": null,
"retweet_source_id": null,
"mentions": null,
"replyto_source_id": null
},
"934934336381636608": {
"quote_count": 0,
"reply_count": 0,
"hashtags": null,
"datetime": "2017-11-26 23:58:10",
"date": "2017-11-26",
"like_count": 0,
"verified": "False",
"sentiment": 0,
"author": "selfresqingprncess",
"location": "Maine, USA",
"tid": "934934336381636608",
"retweet_count": 0,
"type": "retweet",
"media_list": null,
"quoted_source_id": null,
"url_list": null,
"tweet_text": "RT #kylegriffin1: Reminder: The Senate Judiciary Committee gave Jared Kushner a November 27 deadline to turn over the missing records… ",
"author_profile_image": "https://pbs.twimg.com/profi...",
"author_screen_name": "slfresqngprncss",
"author_id": "100536014",
"lang": "en",
"keywords_processed_list": [
"reminder",
"keywords_processed_list": [
"reminder",
"senate judiciary committee",
"kushner november",
"deadline"
],
"retweet_source_id": "934872065471115264",
"mentions": [
"kylegriffin1"
],
"replyto_source_id": null
}
}
There's I have tried :-
query = """
with {json} as data UNWIND data as doc FOREACH( l in doc| MERGE (label1:Label1 {author:l.author}))
"""
But I am getting error:- Cannot merge node using null property value for author.
Your JSON file is malformed.
A JSON string cannot contain control characters (like carriage returns or linefeeds). And your JSON contains an extraneous section (that is probably a copy/paste error):
"keywords_processed_list": [
"reminder",
Furthermore, UNWIND can only be used with lists. You cannot use it to get the properties from a map. Once you have fixed your JSON errors, try the following query (I assume that json is a parameter whose value is your JSON data):
UNWIND KEYS($json) AS k
MERGE(label1:Label1 {author: $json[k].author})

Convert JSON list to table where one record is a list

I have a JSON list with 253 entries and 7 records where the 7th record is a list with usually 2 entries.
I'm trying to convert this in Power Query to give me a table output. I've only really ever used the basic connection in Excel to do this automatically without problems.
My current error is:
Expression.Error: We cannot convert a value of type Record to type Text.
Details:
Value=Record
Type=Type
I'm not sure where to go and I can't see examples for my specific situation, although I'm sure there are plenty of examples - just that I'm not good enough with this to understans
[
{
"Id": "lorum-ipsum1",
"Description": "sitename 1",
"Latitude": 1.0,
"Longitude": -1.0,
"Postcode": "AB1 2CD",
"CountryCode": "GB",
"Connectors": [
{
"Id": "lorum-ipsum1-a",
"Number": 1,
"Status": 1,
"ErrorCode": "NoError",
"ChargepointName": "GP00000",
"Shape": 0,
"Mode": 0,
"Volts": 240,
"Amps": 0,
"Phase": 2,
"PricingInformationUrl": "hrefhere",
"UsageRestrictions": []
},
{
"Id": "lorum-ipsum1-b",
"Number": 2,
"Status": 1,
"ErrorCode": "NoError",
"ChargepointName": "GP0000",
"Shape": 0,
"Mode": 0,
"Volts": 240,
"Amps": 0,
"Phase": 2,
"PricingInformationUrl": "hrefhere",
"UsageRestrictions": []
}
]
},
{
"Id": "lorum-ipsum2",
"Description": "sitename 2",
"Latitude": 1.0,
"Longitude": -1.0,
"Postcode": "AB1 2CD",
"CountryCode": "GB",
"Connectors": [
{
"Id": "lorum-ipsum2-a",
"Number": 1,
"Status": 1,
"ErrorCode": "NoError",
"ChargepointName": "GP00000",
"Shape": 0,
"Mode": 0,
"Volts": 240,
"Amps": 0,
"Phase": 2,
"PricingInformationUrl": "hrefhere",
"UsageRestrictions": []
},
{
"Id": "lorum-ipsum2-b",
"Number": 2,
"Status": 1,
"ErrorCode": "NoError",
"ChargepointName": "GP0000",
"Shape": 0,
"Mode": 0,
"Volts": 240,
"Amps": 0,
"Phase": 2,
"PricingInformationUrl": "hrefhere",
"UsageRestrictions": []
}
]
},
I think you want to use Table.FromRecords function to transform the JSON into a table. However, since the Connectors field of each record is itself a list of records, I think you will need to call it a second time.
To give you an example:
let
serialised = "[{""Id"":""lorum-ipsum1"",""Description"":""sitename 1"",""Latitude"":1,""Longitude"":-1,""Postcode"":""AB1 2CD"",""CountryCode"":""GB"",""Connectors"":[{""Id"":""lorum-ipsum1-a"",""Number"":1,""Status"":1,""ErrorCode"":""NoError"",""ChargepointName"":""GP00000"",""Shape"":0,""Mode"":0,""Volts"":240,""Amps"":0,""Phase"":2,""PricingInformationUrl"":""hrefhere"",""UsageRestrictions"":[]},{""Id"":""lorum-ipsum1-b"",""Number"":2,""Status"":1,""ErrorCode"":""NoError"",""ChargepointName"":""GP0000"",""Shape"":0,""Mode"":0,""Volts"":240,""Amps"":0,""Phase"":2,""PricingInformationUrl"":""hrefhere"",""UsageRestrictions"":[]}]},{""Id"":""lorum-ipsum2"",""Description"":""sitename 2"",""Latitude"":1,""Longitude"":-1,""Postcode"":""AB1 2CD"",""CountryCode"":""GB"",""Connectors"":[{""Id"":""lorum-ipsum2-a"",""Number"":1,""Status"":1,""ErrorCode"":""NoError"",""ChargepointName"":""GP00000"",""Shape"":0,""Mode"":0,""Volts"":240,""Amps"":0,""Phase"":2,""PricingInformationUrl"":""hrefhere"",""UsageRestrictions"":[]},{""Id"":""lorum-ipsum2-b"",""Number"":2,""Status"":1,""ErrorCode"":""NoError"",""ChargepointName"":""GP0000"",""Shape"":0,""Mode"":0,""Volts"":240,""Amps"":0,""Phase"":2,""PricingInformationUrl"":""hrefhere"",""UsageRestrictions"":[]}]}]",
deserialised = Json.Document(serialised),
toTable = Table.FromRecords(deserialised),
transformConnectors = Table.TransformColumns(toTable, {{"Connectors", Table.FromRecords}})
in
transformConnectors
which gives me:
From there, you can explore/continue the rest of the transformation yourself (since you haven't specified what you want).
You can expand some/all nested columns inside of the Connectors column by either clicking the icon highlighted in the image above -- or writing any necessary M code.

Ruby API call to get data from complex json

I'm making an API GET call using Ruby - the call is made to a Learning Management System and returns the following JSON:
{
"id": 12345,
"body": null,
"url": null,
"grade": "75",
"score": 75,
"submitted_at": "2020-05-02T11:30:53Z",
"assignment_id": 9876,
"user_id": 1111,
"submission_type": "online_upload",
"workflow_state": "graded",
"grade_matches_current_submission": true,
"graded_at": "2017-06-05T08:47:49Z",
"grader_id": 2222,
"attempt": 1,
"cached_due_date": "2020-05-03T15:00:00Z",
"excused": false,
"late_policy_status": null,
"points_deducted": null,
"grading_period_id": null,
"late": false,
"missing": false,
"seconds_late": 0,
"entered_grade": "75",
"entered_score": 75,
"preview_url": "https://etcetc",
"turnitin_data": {
"attachment_33333": {
"status": "scored",
"object_id": "44444444",
"similarity_score": 0,
"web_overlap": 0,
"publication_overlap": 0,
"student_overlap": 0,
"state": "none"
}
},
"attachments": [
{
"id": 33333,
"uuid": "kjsdkjhsdfkhsfd",
"folder_id": 55555,
"display_name": "Submission.pdf",
"filename": "Submission.pdf",
"content-type": "application/pdf",
"url": "https://etcetc",
"size": 2668226,
"created_at": "2020-05-02T11:30:51Z",
"updated_at": "2020-06-06T15:01:46Z",
"unlock_at": null,
"locked": false,
"hidden": false,
"lock_at": null,
"hidden_for_user": false,
"thumbnail_url": null,
"modified_at": "2020-05-02T11:30:51Z",
"mime_class": "pdf",
"media_entry_id": null,
"locked_for_user": false,
"preview_url": "api/etcetc"
}
],
"submission_comments": [
{
"id": 99999,
"comment": "here’s a comment",
"author_id": 1,
"author_name": "Mickey Mouse",
"created_at": "2020-05-15T12:54:08Z",
"edited_at": null,
"avatar_path": "/images/users/1",
"author": {
"id": 1,
"display_name": " Mickey Mouse ",
"avatar_image_url": "https://etcetc",
"html_url": "https://etcetc"
}
},
{
"id": 223344,
"comment": "another comment",
"author_id": 2,
"author_name": "Donald Duck",
"created_at": "2020-06-05T10:48:51Z",
"edited_at": null,
"avatar_path": "/images/users/2",
"author": {
"id": 2,
"display_name": "Donald Duck",
"avatar_image_url": "https://etcetc",
"html_url": "https://etcetc"
}
}
]
}
I need to be able to retrieve specific values from "submission_comments", namely the values for "comment", "author_id" and "author_name". At the moment the best I can do is retrieve "submission_comments" as one big entity. Here's how I'm getting that far:
require 'typhoeus'
require 'link_header'
require 'json'
require 'csv'
the_url = 'https://etctetc'
token = 'mytoken'
api_endpoint = '/api/etc'
output_csv = 'C:\Users\me\Desktop\Ruby Canvas course\assignment_comments.csv'
CSV.open(output_csv, 'wb') do |csv|
csv << ["user_id", "TII", "marker"]
end
request_url = "#{the_url}#{api_endpoint}"
count = 0
more_data = true
while more_data
get_comments = Typhoeus::Request.new(
request_url,
method: :get,
headers: { authorization: "Bearer #{token}" }
)
get_comments.on_complete do |response|
#get next link
links = LinkHeader.parse(response.headers['link']).links
next_link = links.find { |link| link['rel'] == 'next' }
request_url = next_link.href if next_link
if next_link && "#{response.body}" != "[]"
more_data = true
else
more_data = false
end
if response.code == 200
data = JSON.parse(response.body)
data.each do |comments|
CSV.open(output_csv, 'a') do |csv|
csv << [comments['id'], comments['turnitin_data'], comments['submission_comments']]
end
end
else
puts "Something went wrong! Response code was #{response.code}"
end
end
get_comments.run
end
puts "Script done running"
I'm new to this (the ruby code is based on an exercise so I may not fully understand it)- any help/advice would be really appreciated!
EDIT: I should also note that this isn't the total JSON response I'm dealing with - this is just one of ten items that are returned
"submission_comments": [
{
"id": 99999,
}
]
the [] means it is array. {} means it is an object.
So you probably need to do something like this:
json["submission_comments"].first["id"]
or better iterate through it:
ids = json["submission_comments"].map{|comment| comment["id"]}
I'm able to get the variables you need if you can read the JSON file in as text, then use Ruby's JSON.parse(...) method on it. I think the main problem is that JSON uses null but Ruby hashes use nil. You could do a string replace or try something like this (I did not modify your JSON, only put it into a single quoted string):
json_text = '{
"id": 12345,
"body": null,
"url": null,
"grade": "75",
"score": 75,
"submitted_at": "2020-05-02T11:30:53Z",
"assignment_id": 9876,
"user_id": 1111,
"submission_type": "online_upload",
"workflow_state": "graded",
"grade_matches_current_submission": true,
"graded_at": "2017-06-05T08:47:49Z",
"grader_id": 2222,
"attempt": 1,
"cached_due_date": "2020-05-03T15:00:00Z",
"excused": false,
"late_policy_status": null,
"points_deducted": null,
"grading_period_id": null,
"late": false,
"missing": false,
"seconds_late": 0,
"entered_grade": "75",
"entered_score": 75,
"preview_url": "https://etcetc",
"turnitin_data": {
"attachment_33333": {
"status": "scored",
"object_id": "44444444",
"similarity_score": 0,
"web_overlap": 0,
"publication_overlap": 0,
"student_overlap": 0,
"state": "none"
}
},
"attachments": [
{
"id": 33333,
"uuid": "kjsdkjhsdfkhsfd",
"folder_id": 55555,
"display_name": "Submission.pdf",
"filename": "Submission.pdf",
"content-type": "application/pdf",
"url": "https://etcetc",
"size": 2668226,
"created_at": "2020-05-02T11:30:51Z",
"updated_at": "2020-06-06T15:01:46Z",
"unlock_at": null,
"locked": false,
"hidden": false,
"lock_at": null,
"hidden_for_user": false,
"thumbnail_url": null,
"modified_at": "2020-05-02T11:30:51Z",
"mime_class": "pdf",
"media_entry_id": null,
"locked_for_user": false,
"preview_url": "api/etcetc"
}
],
"submission_comments": [
{
"id": 99999,
"comment": "here’s a comment",
"author_id": 1,
"author_name": "Mickey Mouse",
"created_at": "2020-05-15T12:54:08Z",
"edited_at": null,
"avatar_path": "/images/users/1",
"author": {
"id": 1,
"display_name": " Mickey Mouse ",
"avatar_image_url": "https://etcetc",
"html_url": "https://etcetc"
}
},
{
"id": 223344,
"comment": "another comment",
"author_id": 2,
"author_name": "Donald Duck",
"created_at": "2020-06-05T10:48:51Z",
"edited_at": null,
"avatar_path": "/images/users/2",
"author": {
"id": 2,
"display_name": "Donald Duck",
"avatar_image_url": "https://etcetc",
"html_url": "https://etcetc"
}
}
]
}'
Part I added:
ruby_hash = JSON.parse(json_text)
submission_comments = ruby_hash["submission_comments"]
submission_comments.each do |submission_comment|
comment = submission_comment["comment"]
author_id = submission_comment["author_id"]
author_name = submission_comment["author_name"]
puts "Comment: #{comment}, Author ID: #{author_id}, Author Name: #{author_name}\n\n"
end
Terminal Result:
=> Comment: here’s a comment, Author ID: 1, Author Name: Mickey Mouse
=> Comment: another comment, Author ID: 2, Author Name: Donald Duck
Edit: I added a jenky af one-liner version just for fun (presuming the json_text variable above is already initialized)
JSON.parse(json_text)["submission_comments"]
.map{|txt| puts(["comment","author_id","author_name"]
.map{|k| k.instance_eval{"#{upcase}: #{txt[to_s]}"}}.join(', '))}
COMMENT: here’s a comment, AUTHOR_ID: 1, AUTHOR_NAME: Mickey Mouse
COMMENT: another comment, AUTHOR_ID: 2, AUTHOR_NAME: Donald Duck

JSON data in Web_custom_request- Vugen

LR script throws warning and fails for the JSON Response.
Below is the script
char *request_json_base;
char *request_json;
web_add_header("Content-Type", "application/json; charset=utf-8");
request_json_base= "{\"GLDefaultId\":0,\"ProjectId\":{ContactProjectId},\"CoaMainId\":10,\"UserId\":{UserId},\"Notes\":\"\",\"PRO_GLDefaultValue\":[{\"GLDefaultValueId\":0,\"GLDefaultId\":0,\"SegmentId\":16,\"Alias\":\"ACT\",\"SegmentType\":\"DETAIL\",\"FunctionTypeId\":0,\"Value\":\"&&&\",\"Mask\":\"&&&\",\"IsRequired\":true,\"LastModified\":null,\"IsCoaRequired\":true},{\"GLDefaultValueId\":0,\"GLDefaultId\":0,\"SegmentId\":17,\"Alias\":\"ST\",\"SegmentType\":\"SET\",\"FunctionTypeId\":0,\"Value\":\"&&\",\"Mask\":\"&&\",\"IsRequired\":false,\"LastModified\":null,\"IsCoaRequired\":false}]}",
request_json = lr_eval_string(request_json_base);
lr_save_string(request_json, "REQUEST_JSON_PARAM");
web_custom_request("PRO_GLDefault",
"URL=https://{URL}/PayrollWebApi/V1/PRO_GLDefault?userId={UserId}",
"Method=POST",
"TargetFrame=",
"Resource=0",
"RecContentType=application/json",
"Referer=",
"Snapshot=t299.inf",
"Mode=HTML",
"EncType=application/json",
"Body={REQUEST_JSON_PARAM}",
LAST);
Below is Replay Log :
Action3_SUNP_ProjectSetup_RatesandSetupInfo.c(81): Warning: The string '"GLDefaultValueId":0,"GLDefaultId":0,"SegmentId":16,"Alias":"ACT","SegmentType":"DETAIL","FunctionTypeId":0,"Value":"&&&","Mask":"&&&","IsRequired":true,"LastModified":null,"IsCoaRequired":true' with parameter delimiters is not a parameter.
Action3_SUNP_ProjectSetup_RatesandSetupInfo.c(81): Warning: The string '"GLDefaultValueId":0,"GLDefaultId":0,"SegmentId":17,"Alias":"ST","SegmentType":"SET","FunctionTypeId":0,"Value":"&&","Mask":"&&","IsRequired":false,"LastModified":null,"IsCoaRequired":false' with parameter delimiters is not a parameter.
Action3_SUNP_ProjectSetup_RatesandSetupInfo.c(83): Notify: Saving Parameter "REQUEST_JSON_PARAM = {"GLDefaultId":0,"ProjectId":1810,"CoaMainId":10,"UserId":67,"Notes":"","PRO_GLDefaultValue":[{"GLDefaultValueId":0,"GLDefaultId":0,"SegmentId":16,"Alias":"ACT","SegmentType":"DETAIL","FunctionTypeId":0,"Value":"&&&","Mask":"&&&","IsRequired":true,"LastModified":null,"IsCoaRequired":true},{"GLDefaultValueId":0,"GLDefaultId":0,"SegmentId":17,"Alias":"ST","SegmentType":"SET","FunctionTypeId":0,"Value":"&&","Mask":"&&","IsRequired":false,"LastModified":null,"IsCoaRequired":false}]}".
Action3_SUNP_ProjectSetup_RatesandSetupInfo.c(85): {"$id":"1","Message":"An error has occurred."}
Below is Body of Original Request call:
"Body={\"GLDefaultId\":0,\"ProjectId\":{ContactProjectId},\"CoaMainId\":10,\"UserId\":{UserId},\"Notes\":\"\",\"PRO_GLDefaultValue\":[{\"GLDefaultValueId\":0,\"GLDefaultId\":0,\"SegmentId\":16,\"Alias\":\"ACT\",\"SegmentType\":\"DETAIL\",\"FunctionTypeId\":0,\"Value\":\"&&&\",\"Mask\":\"&&&\",\"IsRequired\":true,\"LastModified\":null,\"IsCoaRequired\":true},{\"GLDefaultValueId\":0,\"GLDefaultId\":0,\"SegmentId\":17,\"Alias\":\"ST\",\"SegmentType\":\"SET\",\"FunctionTypeId\":0,\"Value\":\"&&\",\"Mask\":\"&&\",\"IsRequired\":false,\"LastModified\":null,\"IsCoaRequired\":false}]}",
LAST);
Below is the Successful Response during the recording:
Request :
{
"GLDefaultId": 0,
"ProjectId": 1806,
"CoaMainId": 10,
"UserId": 2,
"Notes": "",
"PRO_GLDefaultValue": [
{
"GLDefaultValueId": 0,
"GLDefaultId": 0,
"SegmentId": 16,
"Alias": "ACT",
"SegmentType": "DETAIL",
"FunctionTypeId": 0,
"Value": "&&&",
"Mask": "&&&",
"IsRequired": true,
"LastModified": null,
"IsCoaRequired": true
},
{
"GLDefaultValueId": 0,
"GLDefaultId": 0,
"SegmentId": 17,
"Alias": "ST",
"SegmentType": "SET",
"FunctionTypeId": 0,
"Value": "&&",
"Mask": "&&",
"IsRequired": false,
"LastModified": null,
"IsCoaRequired": false
}
]
}
Response:
{
"$id": "1",
"GLDefaultId": 1,
"ProjectId": 1806,
"CoaMainId": 10,
"Notes": "",
"PRO_GLDefaultValue": [
{
"$id": "2",
"GLDefaultValueId": 1,
"GLDefaultId": 1,
"Alias": "ACT",
"FunctionTypeId": 0,
"Value": "&&&",
"SegmentId": 16,
"IsRequired": true,
"ModifiedOn": "7/1/2017 4:48:06 PM",
"ModifiedBy": 2,
"PRO_GLDefault": {
"$ref": "1"
},
"LastModified": null
},
{
"$id": "3",
"GLDefaultValueId": 2,
"GLDefaultId": 1,
"Alias": "ST",
"FunctionTypeId": 0,
"Value": "&&",
"SegmentId": 17,
"IsRequired": false,
"ModifiedOn": "7/1/2017 4:48:06 PM",
"ModifiedBy": 2,
"PRO_GLDefault": {
"$ref": "1"
},
"LastModified": null
}
]
}
Please advise me how to overcome this
Thanks,
Raj
Warning is just that, a warning, not an error, and expected if you have curly braces in your post
What is line 85, where the error is generated?
See these two lines:
request_json_base= "{\"GLDefaultId\":0,\"ProjectId\":{ContactProjectId},\"CoaMainId\":10,\"UserId\":{UserId},\"Notes\":\"\",\"PRO_GLDefaultValue\":[{\"GLDefaultValueId\":0,\"GLDefaultId\":0,\"SegmentId\":16,\"Alias\":\"ACT\",\"SegmentType\":\"DETAIL\",\"FunctionTypeId\":0,\"Value\":\"&&&\",\"Mask\":\"&&&\",\"IsRequired\":true,\"LastModified\":null,\"IsCoaRequired\":true},{\"GLDefaultValueId\":0,\"GLDefaultId\":0,\"SegmentId\":17,\"Alias\":\"ST\",\"SegmentType\":\"SET\",\"FunctionTypeId\":0,\"Value\":\"&&\",\"Mask\":\"&&\",\"IsRequired\":false,\"LastModified\":null,\"IsCoaRequired\":false}]}",
request_json = lr_eval_string(request_json_base);
They are not doing what you think they are doing. You are neither allocating space for this data ( see malloc() in combination with a char * variable), nor are you assigning the value to the variable correctly for the C language (see strcpy() )
Another reference here on Stackoverflow
How to correctly assign a new string value?

parse json output for primary and secondary hosts from replSetGetStatus

I've used pymongo to connect to mongo replica set and print the status of replica set using json dump. I want to parse this output and display "name" and "stateStr" into a list or array for the user to be able to pick a particular host.Here is my json dump output:
{
{
"replSetGetStatus": {
"date": "2016-10-07T14:21:25",
"members": [
{
"_id": 0,
"health": 1.0,
"name": "xxxxxxxxxxx:27017",
"optime": null,
"optimeDate": "2016-10-07T13:50:11",
"self": true,
"state": 1,
"stateStr": "PRIMARY",
"uptime": 32521
},
{
"_id": 1,
"health": 1.0,
"lastHeartbeat": "2016-10-07T14:21:24",
"lastHeartbeatRecv": "2016-10-07T14:21:24",
"name": "xxxxxxxxxxxx:27017",
"optime": null,
"optimeDate": "2016-10-07T13:50:11",
"pingMs": 0,
"state": 2,
"stateStr": "SECONDARY",
"syncingTo": "xxxxxxxxxxxx:27017",
"uptime": 27297
},
{
"_id": 2,
"health": 1.0,
"lastHeartbeat": "2016-10-07T14:21:24",
"lastHeartbeatRecv": "2016-10-07T14:21:24",
"name": "xxxxxxxxxxxxx:27020",
"pingMs": 0,
"state": 7,
"stateStr": "ARBITER",
"uptime": 32517
}
],
"myState": 1,
"ok": 1.0,
"set": "replica1"
}
}
Please try below Javascript code. It worked for me.
use admin;
var result = rs.status();
var length = result.members.length;
for (var i=0;i<length;i++){
print ("Server Name-" +result.members[i].name);
print ("Server State-" +result.members[i].stateStr);
}