Loading test image into canvas - html

I'm trying to test an AngularJS service that gets handed the ImageData from a canvas, does some pattern recognition, and returns the recognized patterns. I would like to write a unit test (not an E2E test), to build the service step by step. I fail to properly load a test-png image that I can draw onto a canvas, from which I then can extract the ImageData.
I would like to learn the proper way to solve my problem: Getting the ImageData object for a test-image in a unit test. What I am currently doing, using Karma, Jasmine 2, PhantomJS:
it('simple pattern', function (done) {
var canvas = angular.element('<canvas></canvas>')
var context = canvas[0].getContext("2d")
var src = 'test/images/3x3-bw-1star.png' // Not found by PhantomJS
var src2 = "app/images/yeoman.png" // Found.
var image = angular.element('<img src="' + src + '"/>')
image.onerror = function() { // Not called.
fail("Could not load image.")
done()
}
image.onload = function() { // Not called.
context.drawImage(image[0], 0, 0)
var imageData = context.getImageData(0, 0, image.width, image.height)
expect(starFinder.findStars(imageData)).toEqual(true)
done()
}
}
Problems with this code:
PhantomJS fails to load the test-image at src (404). The production image at src2 is correctly loaded. The path is correct, and I don't see any restriction to the "app" directory anywhere.
onload is never called. Probably, the event handler is attached to late, so when the angular.element call returns, the image is loaded already.
If I call onload manually, the context is undefined.

After #MarcoCI's (correct) suggestion, I thought I was seriously working on the wrong abstraction layer. Thus, I searched for a suitable image manipulation library, and stumbled upon Caman. I then refactored the service to accept a caman object. In the test, I create a Caman object using a canvas and the image URL. Which feels correct for what I want to do: Get an image and have it analyzed.
The 404 error for the test images can be resolved by adding the image to the list of files as served-but-not-included:
{pattern: 'test/images/*.png', watched: true, served: true, included: false}
and then setting the urlRoot.
urlRoot: 'base'
The loading and event handling is done by Caman.

Related

Ways to capture incoming WebRTC video streams (client side)

I am currently looking to find a best way to store a incoming webrtc video streams. I am joining the videocall using webrtc (via chrome) and I would like to record every incoming video stream to from each participant to the browser.
The solutions I am researching are:
Intercept network packets coming to the browsers e.g. using Whireshark and then decode. Following this article: https://webrtchacks.com/video_replay/
Modifying a browser to store recording as a file e.g. by modifying Chromium itself
Any screen-recorders or using solutions like xvfb & ffmpeg is not an options due the resources constrains. Is there any other way that could let me capture packets or encoded video as a file? The solution must be working on Linux.
if the media stream is what you want a method is to override the browser's PeerConnection. Here is an example:
In an extension manifest add the following content script:
content_scripts": [
{
"matches": ["http://*/*", "https://*/*"],
"js": ["payload/inject.js"],
"all_frames": true,
"match_about_blank": true,
"run_at": "document_start"
}
]
inject.js
var inject = '('+function() {
//overide the browser's default RTCPeerConnection.
var origPeerConnection = window.RTCPeerConnection || window.webkitRTCPeerConnection || window.mozRTCPeerConnection;
//make sure it is supported
if (origPeerConnection) {
//our own RTCPeerConnection
var newPeerConnection = function(config, constraints) {
console.log('PeerConnection created with config', config);
//proxy the orginal peer connection
var pc = new origPeerConnection(config, constraints);
//store the old addStream
var oldAddStream = pc.addStream;
//addStream is called when a local stream is added.
//arguments[0] is a local media stream
pc.addStream = function() {
console.log("our add stream called!")
//our mediaStream object
console.dir(arguments[0])
return oldAddStream.apply(this, arguments);
}
//ontrack is called when a remote track is added.
//the media stream(s) are located in event.streams
pc.ontrack = function(event) {
console.log("ontrack got a track")
console.dir(event);
}
window.ourPC = pc;
return pc;
};
['RTCPeerConnection', 'webkitRTCPeerConnection', 'mozRTCPeerConnection'].forEach(function(obj) {
// Override objects if they exist in the window object
if (window.hasOwnProperty(obj)) {
window[obj] = newPeerConnection;
// Copy the static methods
Object.keys(origPeerConnection).forEach(function(x){
window[obj][x] = origPeerConnection[x];
})
window[obj].prototype = origPeerConnection.prototype;
}
});
}
}+')();';
var script = document.createElement('script');
script.textContent = inject;
(document.head||document.documentElement).appendChild(script);
script.parentNode.removeChild(script);
I tested this with a voice call in google hangouts and saw that two mediaStreams where added via pc.addStream and one track was added via pc.ontrack. addStream would seem to be local media streams and the event object in ontrack is a RTCTrackEvent which has a streams object. which I assume are what you are looking for.
To access these streams from your extenion's content script you will need to create audio elements and set the "srcObject" property to the media stream: e.g.
pc.ontrack = function(event) {
//check if our element exists
var elm = document.getElementById("remoteStream");
if(elm == null) {
//create an audio element
elm = document.createElement("audio");
elm.id = "remoteStream";
}
//set the srcObject to our stream. not sure if you need to clone it
elm.srcObject = event.streams[0].clone();
//write the elment to the body
document.body.appendChild(elm);
//fire a custom event so our content script knows the stream is available.
// you could pass the id in the "detail" object. for example:
//CustomEvent("remoteStreamAdded", {"detail":{"id":"audio_element_id"}})
//then access if via e.detail.id in your event listener.
var e = CustomEvent("remoteStreamAdded");
window.dispatchEvent(e);
}
Then in your content script you can listen for that event/access the mediastream like so:
window.addEventListener("remoteStreamAdded", function(e) {
elm = document.getElementById("remoteStream");
var stream = elm.captureStream();
})
With the capture stream available to your content script you can do pretty much anything you want with it. For example, MediaRecorder works really well for recording the stream(s) or you could use something like peer.js or maybe binary.js to stream to another source.
I haven't tested this but it should also be possible to override the local streams. For example, in the inject.js you could establish some blank mediastream, override navigator.mediaDevices.getUserMedia and instead of returning the local mediastream return your own mediastream.
This method should work in firefox and maybe others as well assuming you use an extenion/app to load the inject.js script at the start of the document. It being loaded before any of the target's libs is key to making this work.
edited for more detail
edited for even more detail
Capturing packets will only give you the network packets which you would then need to turn into frames and put into a container. A server such as Janus can record videos.
Running headless chrome and using the javascript MediaRecorder API is another option but much more heavy on resources.

Empty microphone data from getUserMedia

Using the following code I get all zeroes in the audio stream from my microphone (using Chrome):
navigator.mediaDevices.getUserMedia({audio:true}).then(
function(stream) {
var audioContext = new AudioContext();
var source = audioContext.createMediaStreamSource(stream);
var node = audioContext.createScriptProcessor(8192, 1, 1);
source.connect(node);
node.connect(audioContext.destination);
node.onaudioprocess = function (e) {
console.log("Audio:", e.inputBuffer.getChannelData(0));
};
}).catch(function(error) {console.error(error);})
I created a jsfiddle here: https://jsfiddle.net/g3dck4dr/
What's wrong here?
Umm, something in your hardware config is wrong? The fiddle works fine for me (that is, it shows non-zero values). Do other web audio input tests work, like https://webaudiodemos.appspot.com/input/index.html?
Test to make sure you've selected the right input, and you don't have a hardware mute switch on.

"Tainted canvases may not be loaded" Cross domain issue with WebGL textures

I've learnt a lot in the last 48 hours about cross domain policies, but apparently not enough.
Following on from this question. My HTML5 game supports Facebook login. I'm trying to download profile pictures of people's friends. In the HTML5 version of my game I get the following error in Chrome.
detailMessage: "com.google.gwt.core.client.JavaScriptException:
(SecurityError) ↵ stack: Error: Failed to execute 'texImage2D' on
'WebGLRenderingContext': Tainted canvases may not be loaded.
As I understand it, this error occurs because I'm trying to load an image from a different domain, but this can be worked around with an Access-Control-Allow-Origin header, as detailed in this question.
The URL I'm trying to download from is
https://graph.facebook.com/1387819034852828/picture?width=150&height=150
Looking at the network tab in Chrome I can see this has the required access-control-allow-origin header and responds with a 302 redirect to a new URL. That URL varies, I guess depending on load balancing, but here's an example URL.
https://fbcdn-profile-a.akamaihd.net/hprofile-ak-xap1/v/t1.0-1/c0.0.160.160/p160x160/11046398_1413754142259317_606640341449680402_n.jpg?oh=6738b578bc134ff207679c832ecd5fe5&oe=562F72A4&gda=1445979187_2b0bf0ad3272047d64c7bfc2dbc09a29
This URL also has the access-control-allow-origin header. So I don't understand why this is failing.
Being Facebook, and the fact that thousands of apps, games and websites display users profile pictures, I'm assuming this is possible. I'm aware that I can bounce through my own server, but I'm not sure why I should have to.
Answer
I eventually got cross domain image loading working in libgdx with the following code (which is pretty hacky and I'm sure can be improved). I've not managed to get it working with the AssetDownloader yet. I'll hopefully work that out eventually.
public void downloadPixmap(final String url, final DownloadPixmapResponse response) {
final RootPanel root = RootPanel.get("embed-html");
final Image img = new Image(url);
img.getElement().setAttribute("crossOrigin", "anonymous");
img.addLoadHandler(new LoadHandler() {
#Override
public void onLoad(LoadEvent event) {
HtmlLauncher.application.getPreloader().images.put(url, ImageElement.as(img.getElement()));
response.downloadComplete(new Pixmap(Gdx.files.internal(url)));
root.remove(img);
}
});
root.add(img);
}
interface DownloadPixmapResponse {
void downloadComplete(Pixmap pixmap);
void downloadFailed(Throwable e);
}
are you setting the crossOrigin attribute on your img before requesting it?
var img = new Image();
img.crossOrigin = "anonymous";
img.src = "https://graph.facebook.com/1387819034852828/picture?width=150&height=150";
It's was working for me when this question was asked. Unfortunately the URL above no longer points to anything so I've changed it in the example below
var img = new Image();
img.crossOrigin = "anonymous"; // COMMENT OUT TO SEE IT FAIL
img.onload = uploadTex;
img.src = "https://i.imgur.com/ZKMnXce.png";
function uploadTex() {
var gl = document.createElement("canvas").getContext("webgl");
var tex = gl.createTexture();
gl.bindTexture(gl.TEXTURE_2D, tex);
try {
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, img);
log("DONE: ", gl.getError());
} catch (e) {
log("FAILED to use image because of security:", e);
}
}
function log() {
var div = document.createElement("div");
div.innerHTML = Array.prototype.join.call(arguments, " ");
document.body.appendChild(div);
}
<body></body>
How to check you're receiving the headers
Open your devtools, pick the network tab, reload the page, select the image in question, look at both the REQUEST headers and the RESPONSE headers.
The request should show your browser sent an Origin: header
The response should show you received
Access-Control-Allow-Methods: GET, OPTIONS, ...
Access-Control-Allow-Origin: *
Note, both the response AND THE REQUEST must show the entries above. If the request is missing Origin: then you didn't set img.crossOrigin and the browser will not let you use the image even if the response said it was ok.
If your request has the Origin: header and the response does not have the other headers than that server did not give permission to use the image to display it. In other words it will work in an image tag and you can draw it to a canvas but you can't use it in WebGL and any 2d canvas you draw it into will become tainted and toDataURL and getImageData will stop working
this is a classic crossdomain issue that happens when you're developing locally.
I use python's simple server as a quick fix for this.
navigate to your directory in the terminal, then type:
$ python -m SimpleHTTPServer
and you'll get
Serving HTTP on 0.0.0.0 port 8000 ...
so go to 0.0.0.0:8000/ and you should see the problem resolved.
You can base64 encode your texture.

Bootstrap Modal with Google Maps with Canvas2HTML equals grey background

So I'm trying to use Google Maps by decoding GPS data from a mobile device and then generates a map route out of it. Using the Static Maps API, I can sort of approximate a trip, but the data gets generated every 1-5 seconds, which is far beyond the amount of information you can send over a GET request to Google Maps.
So my JavaScript map looks like this running a webpage:
Edited: DropBox does not apparently persistently store photo links persistently, switching to OneDrive.
Should look like http://bit.ly/1tjxg6u
Which is glorious. I figured that if I used html2Canvas, I could generate a screenshot of the map, and transfer that file onto another portion of my server, or URI encode it. But when I tried that, I get this instead.
Does Look Like http://bit.ly/1jlrOvL
Not glorious at all. I then checked out rasterHTML. But it gave me an error when I used rasterizeHTML.drawDocument of "cannot get innerHTML of undefined" when I call modalMap, (see JavaScript code below). It does let me get the HTML and use drawHTML of the exact same element using .innerHTML. No idea why. Using HTML, that gets me this result:
All Three together http://bit.ly/1ssxIet
rasterizeHTML is in the lower-right. html2Canvas is the lower-left. I'm sort of stuck halfway between on both.
Edited: After doing a lot of testing, the error is:
html2canvas: Error loading ":http://mt0.googleapis.com/vt?...
The URL does show the image when you click on it in the console, but HTML2canvas can't display it. I'm not sure if it's a URL encoding problem or what.
I enabled CORS in my headers, Apache, and the .htaccess just in case that was the issue. I then downloaded the .php proxy file and tried that, just in case that was the issue. Neither worked.
The javascript code:
google.maps.event.addListenerOnce(map, 'tilesloaded', function(){
// Try RasterizeHTML
var modalMap = document.getElementById('mainPageCanvas');
var modalMapHTML = modalMap.innerHTML;
var canvasHolder = document.getElementById('rasterizeHTML');
console.log(modalMap);
console.log(canvasHolder);
var options = {
width : 1000,
height : 400,
executeJs : false,
zoom: 2
};
rasterizeHTML.drawHTML(modalMapHTML, canvasHolder)
.then(function success(renderResult) {
console.log(renderResult);
}, function error(e) {
console.log(e);
}.deb());
html2canvas(document.getElementById('mainPageCanvas'), {
proxy: "//localhost:85/ormc/consumer/build/ajax/php/html2canvasproxy.php",
useCORS: true,
allowTaint:true,
logging: true,
onrendered: function(canvas) {
canvas.setAttribute('crossOrigin','anonymous');
$('#secondaryPageCanvas').replaceWith(canvas);
console.log("Canvas function called");
}.deb()
});
}.deb());
Now if I attempt to do a dataUrl conversion, I get this: "Uncaught SecurityError: Failed to execute 'toDataURL' on 'HTMLCanvasElement': Tainted canvases may not be exported."
The HTML is just:
<div id="mainPageCanvas" style="width:1000px;height:400px;"></div>
<div id='secondaryPageCanvas' style="width:1000px;height:400px;"></div>
<canvas id='rasterizeHTML' style='width:1000px;height:400px;'>
My headers being sent are:
Access-Control-Allow-Methods:OPTIONS, GET
Access-Control-Allow-Origin:*
Access-Control-Request-Method:*
Cache-Control:no-store, no-cache, must-revalidate, post-check=0, pre-check=0
Just to see if anything is triggering the CSP (It's report only), but nothing is throwing it. So, what am I doing wrong?

Web Audio API: How to load another audio file?

I want to write a basic script for HTML5 Web Audio API, can play some audio files. But I don't know how to unload a playing audio and load another one. In my script two audio files are playing in the same time,but not what I wanted.
Here is my code:
var context,
soundSource,
soundBuffer;
// Step 1 - Initialise the Audio Context
context = new webkitAudioContext();
// Step 2: Load our Sound using XHR
function playSound(url) {
// Note: this loads asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
// Our asynchronous callback
request.onload = function() {
var audioData = request.response;
audioGraph(audioData);
};
request.send();
}
// This is the code we are interested in
function audioGraph(audioData) {
// create a sound source
soundSource = context.createBufferSource();
// The Audio Context handles creating source buffers from raw binary
soundBuffer = context.createBuffer(audioData, true/* make mono */);
// Add the buffered data to our object
soundSource.buffer = soundBuffer;
// Plug the cable from one thing to the other
soundSource.connect(context.destination);
// Finally
soundSource.noteOn(context.currentTime);
}
// Stop all of sounds
function stopSounds(){
// How can do this?
}
// Events for audio buttons
document.querySelector('.pre').addEventListener('click',
function () {
stopSounds();
playSound('http://thelab.thingsinjars.com/web-audio-tutorial/hello.mp3');
}
);
document.querySelector('.next').addEventListener('click',
function() {
stopSounds();
playSound('http://thelab.thingsinjars.com/web-audio-tutorial/nokia.mp3');
}
);
You should be pre-loading sounds into buffers once, at launch, and simply resetting the AudioBufferSourceNode whenever you want to play it back.
To play multiple sounds in sequence, you need to schedule them using noteOn(time), one after the other, based on buffer respective lengths.
To stop sounds, use noteOff.
Sounds like you are missing some fundamental web audio concepts. This (and more) is described in detail and shown with samples in this HTML5Rocks tutorial and the FAQ.