Why is the response to gapi.client.drive.realtime.get empty? - google-drive-api

Below is a page that succesfully authenticates, then tries to use the drive.realtime.get method to get a JSON export of an existing realtime document in three ways. The results of the console.log calls are shown inline in comments.
The file with id 'EXISTING-FILE-ID' exists and has had content added using the realtime api. I am able to get the JSON exported data in a browser at
https://www.googleapis.com/drive/v2/files/EXISTING-FILE-ID/realtime?access_token=VALID-ACCESS-TOKEN which returns
{"appId":"CLIENT-ID","revision":10,"data":{"id":"root","type":"Map","value":{"blah":{"json":"anything"},"key":{"json":"val"},"key2":{"json":"val2"}}}}
However, in Chrome, Firefox, and Safari, the response to gapi.client.drive.realtime.get and gapi.client.rpcRequest is always empty: {"result":{}}.
In Chrome and Firefox, the body of the response to gapi.client.request is a string of characters that partially changes when the content of the document is changed with the realtime api. This may be some gzipped content (response headers include {content-encoding: "gzip"}, but I haven't been able to gunzip it. The etag in the response header also changes when the document changes.
In Safari, the gapi.client.request response body contains the same string of characters as on Chrome and Firefox (eyJH...) but the correct contents of the exported document are shown in the console log, the same as when I use a browser window with the googleapis.com url.
<!DOCTYPE html><html><head>
<script type="text/javascript" src="https://apis.google.com/js/api.js"></script>
<script type="text/javascript">
var fileId = 'EXISTING-FILE-ID';
var start = function() {
// load apis (then call authorize)
gapi.load('auth:client,drive-realtime', function() {
gapi.client.load('drive', 'v2', function() {
authorize();
});
});
};
// authorize with drive scope
var authorize = function() {
gapi.auth.authorize({
'client_id': 'CLIENT-ID',
'scope': ['https://www.googleapis.com/auth/drive',
'openid'],
'immediate': true
}, function() {
realtimeget(fileId);
});
};
// try to get realtime document export in 3 different ways
var realtimeget = function(id) {
gapi.client.drive.realtime.get({
'fileId': id
}).execute(function() {
console.log(JSON.stringify(arguments));
// {"0":{"result":{}},"1":"[\n {\n \"id\": \"gapiRpc\",\n \"result\": {}\n }\n]\n"}
});
gapi.client.rpcRequest('drive.realtime.get', 'v2', {
'fileId': id
}).execute(function() {
console.log(JSON.stringify(arguments));
// {"0":{"result":{}},"1":"[\n {\n \"id\": \"gapiRpc\",\n \"result\": {}\n }\n]\n"}
});
gapi.client.request({
'path': '/drive/v2/files/' + id + '/realtime',
'method': 'GET',
}).execute(function() {
console.log('gapi.client.request:');
console.log(arguments[0]);
// false
console.log(arguments[1]);
// {"gapiRequest":{"data":{"body":"eyJhcHBJZCI6IjEwNjY4MTY3MjA5NzQiLCJyZXZpc2lvbiI6MTAsImRhdGEiOnsiaWQiOiJyb290IiwidHlwZSI6Ik1hcCIsInZhbHVlIjp7ImJsYWgiOnsianNvbiI6ImFueXRoaW5nIn0sImtleSI6eyJqc29uIjoidmFsIn0sImtleTIiOnsianNvbiI6InZhbDIifX19fQ==","headers":{"date":"Thu, 08 Aug 2013 19:17:19 GMT","content-encoding":"gzip","x-goog-safety-encoding":"base64","server":"GSE","etag":"\"Q5ElJByAJoL0etObruYVPRipH1k/fDOlc7uypufY3ROxh-RtfV86Kmg\"","content-type":"text/plain; charset=UTF-8","cache-control":"private, max-age=0, must-revalidate, no-transform","x-goog-safety-content-type":"application/json","content-length":"183","expires":"Thu, 08 Aug 2013 19:17:19 GMT"},"status":200,"statusText":"OK"}}}
});
};
</script>
</head>
<body onload="start();"></body></html>

We're looking into the issues with the client library, but for now I would recommend just making an XHR GET to the export URL:
var id = '{DOCUMENT ID}';
var accessToken = gapi.auth.getToken()['access_token'];
var xhr = new XMLHttpRequest();
xhr.open('GET', 'https://www.googleapis.com/drive/v2/files/' + id + '/realtime?access_token=' + accessToken);
xhr.onload = function() {
console.log(xhr.responseText);
};
xhr.onerror = function() {
// Handle error
};
xhr.send();

If you are just running this inline as is, I think the problem is just that you need to wait for the contents to be saved before you do your get.
Add a DocumentSaveStateChangedEvent listener to your document after making the change, and trigger realtimeget when both isPending and isSaving are false.
Looking at this code, a separate page load wouldn't do anything, since its creating a new document each time.

Related

PhantomJS failing to load Google Maps

My end goal is to open a local html file with javascript embedded, creating a map with polygons, and take a screenshot of it using PhantomJS. I have written a simple JS file to do this:
var page = require('webpage').create();
page.open('https://www.google.com/maps', function(status) {
console.log('State: ' + status);
if(status === 'success') {
page.render('example.pdf', {format: 'pdf', quality: '100'});
}
phantom.exit();
});
This returns the error:
ReferenceError: Can't find variable: google
I've tried this on a local html file and on other websites using google maps and I keep getting the same error. I have been successful in taking a screenshot of other websites without google maps. Searching the internet it doesn't seem like people have had issues like this, and have been successful in taking screenshots of pages with google maps...so I'm wondering what could be wrong.
Another note: I installed PhantomJS as a gem in my rails project and am running the javascript file through the rails console using this gem. I have tried it using the standard installation of PhantomJS (v 2.0.0) and it still didn't work.
You'll have to wait for an element in the DOM.
for example on maps.google.com, you can wait for the watermark which is loaded after all tiles are loaded.
var page = require('webpage').create();
page.open('https://www.google.com/maps', function (status) {
console.log('State: ' + status);
if (status === 'success') {
waitFor(function () {
return page.evaluate(function () {
var document_contains_watermark =
document.body.contains(document.getElementById('watermark'));
return document_contains_watermark;
});
}, function () {
page.render('maps-google-com.pdf', {format: 'pdf', quality: '100'});
phantom.exit();
});
}
});
function waitFor(testFn, onReady) {
var loaded = false;
var interval = setInterval(function () {
loaded = testFn();
if (loaded) {
onReady();
clearInterval(interval);
}
}, 1000);
}
If you want to take a screenshot on a page that you developed, use the same above logic but append by yourself an element on the google maps idle event.
google.maps.event.addListenerOnce(map, 'idle', function () {
var loadedElem = document.createElement('div');
loadedElem.setAttribute("id", "idLoadedElem");
document.body.appendChild(loadedElem);
});
you should give puppeter a go, it makes that easy:
const puppeteer = require('puppeteer');
(async () => {
const browser = await puppeteer.launch();
const page = await browser.newPage();
await page.goto('https://example.com');
await page.screenshot({path: 'example.pdf'});
await browser.close();
})();

update jqPlot with json data (flicker)

So I've put this together from a bunch of examples around the internet. I'm pulling data from a text file and plotting it with jqPlot. I'm then recalling a function to continually update the plot from the file:
<div id="chart1" style="height:300px; width:500px;"></div>
<script class="code" type="text/javascript">
// Our ajax data renderer which here retrieves a text file.
// it could contact any source and pull data, however.
// The options argument isn't used in this renderer.
var ajaxDataRenderer = function(url, plot, options) {
var ret = null;
$.ajax({
// have to use synchronous here, else the function
// will return before the data is fetched
async: false,
url: url,
dataType:"json",
success: function(data) {
ret = data;
}
});
return ret;
};
// The url for our json data
var jsonurl = "./jsondata.txt";
// passing in the url string as the jqPlot data argument is a handy
// shortcut for our renderer. You could also have used the
// "dataRendererOptions" option to pass in the url.
var plot1;
$(document).ready(function(){
plot1 = $.jqplot('chart1', jsonurl,{
title: "MY GRAPH",
dataRenderer: ajaxDataRenderer,
dataRendererOptions: {
unusedOptionalUrl: jsonurl
}
});
ConstantPlotter();
});
function ConstantPlotter() {
plot1.destroy();
plot1 = $.jqplot('chart1', jsonurl,{
title: "MY GRAPH",
dataRenderer: ajaxDataRenderer,
dataRendererOptions: {
unusedOptionalUrl: jsonurl
}
});
setTimeout(ConstantPlotter,100)
}
</script>
It works fine, but the plot flickers badly since it's being destroyed and recreated every time. The problem is that when I try to replace the 'destroy and plot' code with
plot1 = $.jqplot('chart1', jsonurl,{
title: "MY GRAPH",
dataRenderer: ajaxDataRenderer,
dataRendererOptions: {
unusedOptionalUrl: jsonurl
}
}).replot();
it works beautifully but I get a terrible memory leak.
Does anyone know a way to constantly update this plot from the file without having the terrible flicker? Thanks!

WebRTC SDP object (local description) by Firefox does not contain DataChannel info unlike Chrome?

I'm testing WebRTC procedure step by step for my sake.
I wrote some testing site for server-less WebRTC.
http://webrtcdevelop.appspot.com/
In fact, STUN server by google is used, but no signalling server deployed.
Session Description Protocol (SDP) is exchanged manually by hand that is CopyPaste between browser windows.
So far, here is the result I've got with the code:
'use strict';
var peerCon;
var ch;
$(document)
.ready(function()
{
init();
$('#remotebtn2')
.attr("disabled", "");
$('#localbtn')
.click(function()
{
offerCreate();
$('#localbtn')
.attr("disabled", "");
$('#remotebtn')
.attr("disabled", "");
$('#remotebtn2')
.removeAttr("disabled");
});
$('#remotebtn')
.click(function()
{
answerCreate(
new RTCSessionDescription(JSON.parse($('#remote')
.val())));
$('#localbtn')
.attr("disabled", "");
$('#remotebtn')
.attr("disabled", "");
$('#remotebtn')
.attr("disabled", "");
});
$('#remotebtn2')
.click(function()
{
answerGet(
new RTCSessionDescription(JSON.parse($('#remote')
.val())));
$('#remotebtn2')
.attr("disabled", "");
});
$('#msgbtn')
.click(function()
{
msgSend($('#msg')
.val());
});
});
var init = function()
{
//offer------
peerCon =
new RTCPeerConnection(
{
"iceServers": [
{
"url": "stun:stun.l.google.com:19302"
}]
},
{
"optional": []
});
var localDescriptionOut = function()
{
console.log(JSON.stringify(peerCon.localDescription));
$('#local')
.text(JSON.stringify(peerCon.localDescription));
};
peerCon.onicecandidate = function(e)
{
console.log(e);
if (e.candidate === null)
{
console.log('candidate empty!');
localDescriptionOut();
}
};
ch = peerCon.createDataChannel(
'ch1',
{
reliable: true
});
ch.onopen = function()
{
dlog('ch.onopen');
};
ch.onmessage = function(e)
{
dlog(e.data);
};
ch.onclose = function(e)
{
dlog('closed');
};
ch.onerror = function(e)
{
dlog('error');
};
};
var msgSend = function(msg)
{
ch.send(msg);
}
var offerCreate = function()
{
peerCon
.createOffer(function(description)
{
peerCon
.setLocalDescription(description, function()
{
//wait for complete of peerCon.onicecandidate
}, error);
}, error);
};
var answerCreate = function(descreption)
{
peerCon
.setRemoteDescription(descreption, function()
{
peerCon
.createAnswer(
function(description)
{
peerCon
.setLocalDescription(description, function()
{
//wait for complete of peerCon.onicecandidate
}, error);
}, error);
}, error);
};
var answerGet = function(description)
{
peerCon.setRemoteDescription(description, function()
{ //
console.log(JSON.stringify(description));
dlog('local-remote-setDescriptions complete!');
}, error);
};
var error = function(e)
{
console.log(e);
};
var dlog = function(msg)
{
var content = $('#onmsg')
.html();
$('#onmsg')
.html(content + msg + '<br>');
}
Firefox(26.0):
RtpDataChannels
onopen event is fired successfully, but send fails.
Chrome(31.0):
RtpDataChannels
onopen event is fired successfully, and send also succeeded.
A SDP object by Chrome is as follows:
{"sdp":".................. cname:L5dftYw3P3clhLve
\r\
na=ssrc:2410443476 msid:ch1 ch1
\r\
na=ssrc:2410443476 mslabel:ch1
\r\
na=ssrc:2410443476 label:ch1
\r\n","type":"offer"}
where the ch1 information defined in the code;
ch = peerCon.createDataChannel(
'ch1',
{
reliable: false
});
is bundled properly.
However, a SDP object (local description) by Firefox does not contain DataChannel at all, and moreover, the SDP is much shorter than Chrome, and less information bundled.
What do I miss?
Probably, I guess the reason that send fails on DataChannel is due to this lack of information in the SDP object by firefox.
How could I fix this?
I investigated sources of various working libraries, such as peerJS, easyRTC, simpleWebRTC, but cannot figure out the reason.
Any suggestion and recommendation to read is appreciated.
[not an answer, yet]
I leave this here just trying to help you. I am not much of a WebRTC developer. But, curious i am, this quite new and verry interresting for me.
Have you seen this ?
DataChannels
Supported in Firefox today, you can use DataChannels to send peer-to-peer
information during an audio/video call. There is
currently a bug that requires developers to set up some sort of
audio/video stream (even a “fake” one) in order to initiate a
DataChannel, but we will soon be fixing that.
Also, i found this bug hook, witch seems to be related.
One last point, your version of adapter.js is different from the one served on code.google. And .. alot. the webrtcDetectedVersion part is missing in yours.
https://code.google.com/p/webrtc/source/browse/stable/samples/js/base/adapter.js
Try that, come back to me with good newz. ?
After last updating, i have this line in console after clicking 'get answer'
Object { name="INVALID_STATE", message="Cannot set remote offer in
state HAVE_LOCAL_OFFER", exposedProps={...}, more...}
but this might be useless info ence i copy pasted the same browser offre to answer.
.. witch made me notice you are using jQuery v1.7.1 jquery.com.
Try updating jQuery (before i kill a kitten), and in the meantime, try make sure you use all updated versions of scripts.
Woups, after fast reading this : https://developer.mozilla.org/en-US/docs/Web/Guide/API/WebRTC/WebRTC_basics then comparing your javascripts, i see no SHIM.
Shims
As you can imagine, with such an early API, you must use the browser
prefixes and shim it to a common variable.
> var PeerConnection = window.mozRTCPeerConnection ||
> window.webkitRTCPeerConnection; var IceCandidate =
> window.mozRTCIceCandidate || window.RTCIceCandidate; var
> SessionDescription = window.mozRTCSessionDescription ||
> window.RTCSessionDescription; navigator.getUserMedia =
> navigator.getUserMedia || navigator.mozGetUserMedia ||
> navigator.webkitGetUserMedia;

Use GM_xmlhttpRequest to POST data on Chrome?

I'm writing a user script to take an image from a page, and upload it to a server.
The script works fine in FF (Greasemonkey and Scriptish), but when I use Chrome (using Tampermonkey or Ninjakit), it does not send the data, it sends the string * [object Object] * instead.
Here is my script:
// ==UserScript==
// #id myid
// #name myname
// #version 1.0
// #namespace ohadcn
// #author Ohad Cohen
// #description mydescription
// #include https://*
// #grant GM_xmlhttpRequest
// #require https://code.jquery.com/jquery-2.0.3.min.js
// #run-at document-end
// ==/UserScript==
function getBase64Image(img) {
var canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0);
var dataURL = canvas.toDataURL("image/png");
return dataURL.replace(/^data:image\/(png|jpg);base64,/, "");
}
img=$("img[alt=myImage]").get(0);
img.onload=function(){
var img64=getBase64Image(img)
var _data=new FormData();
_data.append("image64",img64);
GM_xmlhttpRequest({
method: "POST",
url: "http://myserver.org/mysscript.py",
headers: {
"Content-Type": "multipart/form-data"
},
data:_data,
onload: function(response) {
console.log ("gut response");
$("#input").get()[0].value=response.responseText;
}
});
}
Both Tampermonkey and Ninjakit do send the request. In Tampermonkey, I get a response, in Ninjakit I don't (onload is never called).
But they do not send the actual image encoded with base64 - when I read the data - the server gets [object Object] as the POST body (Instead of data body, I can't get devtools network panel to show requests made by GM_xmlhttpRequest, so I checked it on the server side).
It might be that FormData and multipart/form-data are not well supported on those platforms. Need to look into it more (later).
Meanwhile, try the more typical approach; use application/x-www-form-urlencoded or JSON.
EG:
GM_xmlhttpRequest ( {
method: "POST",
url: "http://myserver.org/mysscript.py",
data: "image64=" + encodeURIComponent (img64),
headers: {
"Content-Type": "application/x-www-form-urlencoded"
},
onload: function (response) {
console.log ("gut response");
$("#input").get()[0].value=response.responseText;
}
} );

chrome.storage.sync does not store the data

I am trying to store the data a user enters inside a textarea in a popup.html. Using jQuery on window unload the data should be synced and on window ready the data should be restored. However, when opening popup.html the content of the textarea is undefined. This is the jQuery code which I am loading in popup.html:
$(window).unload (
function save() {
var textarea = document.querySelector("#contacts").value;
// Old method of storing data locally
//localStorage["contacts"] = textarea.value;
// Save data using the Chrome extension storage API.
chrome.storage.sync.set({contacts: textarea}, function() {
console.log("Contacts saved");
});
});
$(window).ready(
function restore() {
var textarea = document.querySelector("#contacts");
// Old method of retrieving data locally
// var content = localStorage["contacts"];
chrome.storage.sync.get('contacts', function(r) {
console.log("Contacts retrieved");
var content = r["contacts"];
textarea.value = content;
});
});
From popup.js you can invoke a method in background.js file to save the data:
popup.js:
addEventListener("unload", function(){
var background = chrome.extension.getBackgroundPage();
background.mySavefunction(data);
}
background.js:
function mySaveFunction(data){
chrome.storage.sync.set(data, function(){
console.log("Data saved.");
});
}
I found a solution. Instead of using $(window).unload() I now use a submit button which needs to be clicked before closing popup.html:
$("#save-button").click(function() {
var textarea = document.querySelector("#contacts").value;
var save = {};
save["contacts"] = textarea;
// Save data using the Chrome extension storage API.
chrome.storage.sync.set(save, function() {
console.log("Contacts saved");
});
$("#confirm").text("Contacts saved.").show().fadeOut(5000);
});