I'm porting my Firefox extension to Google Chrome. I make heavy use of nsiChannel to read HTTP headers and such, like so:
//initialize the channel in onStartRequest
onStartRequest: function (req /*, ctx*/) {
var channel = req.QueryInterface(Components.interfaces.nsIChannel);
//...more init stuff here
}
onDataAvailable: function (req, ctx, stream, offset, count) {
//.. store the data from the stream for later processing...
stream_ctx.bstream.setInputStream(stream);
stream_ctx.bytes += stream_ctx.bstream.readBytes(count);
},
Does Google's Chrome browser have the equivalent functions? I've seen a bit of HTTP-listener-style stuff, but so far I haven't seen anything that has all of the features of nsiChannel. Still, Mozilla's docs on accessing the low-level stuff like this are a little better organized than what I've found for Chrome, so I might have just missed it.
EDIT:
I'm using a stream listener, starting with this:
Components.classes["#mozilla.org/network/io-service;1"]
.getService(Components.interfaces.nsIIOService)
.newChannel(swf_url, null, null)
.asyncOpen(stream_listener, null);
Here swf_url is the URL to a YouTube video.
The stream_listener is implemented to grab all the bytes from the incoming stream like so:
onDataAvailable: function (req, ctx, stream, offset, count) {
stream_ctx.bstream.setInputStream(stream);
stream_ctx.bytes += stream_ctx.bstream.readBytes(count);
},
When I get to onStopRequest I feed the bytes to a parser/decoder. What I don't know is how to replicate the onDataAvailable method to get the bytes into a stream I can feed into my parser.
Related
I am currently looking to find a best way to store a incoming webrtc video streams. I am joining the videocall using webrtc (via chrome) and I would like to record every incoming video stream to from each participant to the browser.
The solutions I am researching are:
Intercept network packets coming to the browsers e.g. using Whireshark and then decode. Following this article: https://webrtchacks.com/video_replay/
Modifying a browser to store recording as a file e.g. by modifying Chromium itself
Any screen-recorders or using solutions like xvfb & ffmpeg is not an options due the resources constrains. Is there any other way that could let me capture packets or encoded video as a file? The solution must be working on Linux.
if the media stream is what you want a method is to override the browser's PeerConnection. Here is an example:
In an extension manifest add the following content script:
content_scripts": [
{
"matches": ["http://*/*", "https://*/*"],
"js": ["payload/inject.js"],
"all_frames": true,
"match_about_blank": true,
"run_at": "document_start"
}
]
inject.js
var inject = '('+function() {
//overide the browser's default RTCPeerConnection.
var origPeerConnection = window.RTCPeerConnection || window.webkitRTCPeerConnection || window.mozRTCPeerConnection;
//make sure it is supported
if (origPeerConnection) {
//our own RTCPeerConnection
var newPeerConnection = function(config, constraints) {
console.log('PeerConnection created with config', config);
//proxy the orginal peer connection
var pc = new origPeerConnection(config, constraints);
//store the old addStream
var oldAddStream = pc.addStream;
//addStream is called when a local stream is added.
//arguments[0] is a local media stream
pc.addStream = function() {
console.log("our add stream called!")
//our mediaStream object
console.dir(arguments[0])
return oldAddStream.apply(this, arguments);
}
//ontrack is called when a remote track is added.
//the media stream(s) are located in event.streams
pc.ontrack = function(event) {
console.log("ontrack got a track")
console.dir(event);
}
window.ourPC = pc;
return pc;
};
['RTCPeerConnection', 'webkitRTCPeerConnection', 'mozRTCPeerConnection'].forEach(function(obj) {
// Override objects if they exist in the window object
if (window.hasOwnProperty(obj)) {
window[obj] = newPeerConnection;
// Copy the static methods
Object.keys(origPeerConnection).forEach(function(x){
window[obj][x] = origPeerConnection[x];
})
window[obj].prototype = origPeerConnection.prototype;
}
});
}
}+')();';
var script = document.createElement('script');
script.textContent = inject;
(document.head||document.documentElement).appendChild(script);
script.parentNode.removeChild(script);
I tested this with a voice call in google hangouts and saw that two mediaStreams where added via pc.addStream and one track was added via pc.ontrack. addStream would seem to be local media streams and the event object in ontrack is a RTCTrackEvent which has a streams object. which I assume are what you are looking for.
To access these streams from your extenion's content script you will need to create audio elements and set the "srcObject" property to the media stream: e.g.
pc.ontrack = function(event) {
//check if our element exists
var elm = document.getElementById("remoteStream");
if(elm == null) {
//create an audio element
elm = document.createElement("audio");
elm.id = "remoteStream";
}
//set the srcObject to our stream. not sure if you need to clone it
elm.srcObject = event.streams[0].clone();
//write the elment to the body
document.body.appendChild(elm);
//fire a custom event so our content script knows the stream is available.
// you could pass the id in the "detail" object. for example:
//CustomEvent("remoteStreamAdded", {"detail":{"id":"audio_element_id"}})
//then access if via e.detail.id in your event listener.
var e = CustomEvent("remoteStreamAdded");
window.dispatchEvent(e);
}
Then in your content script you can listen for that event/access the mediastream like so:
window.addEventListener("remoteStreamAdded", function(e) {
elm = document.getElementById("remoteStream");
var stream = elm.captureStream();
})
With the capture stream available to your content script you can do pretty much anything you want with it. For example, MediaRecorder works really well for recording the stream(s) or you could use something like peer.js or maybe binary.js to stream to another source.
I haven't tested this but it should also be possible to override the local streams. For example, in the inject.js you could establish some blank mediastream, override navigator.mediaDevices.getUserMedia and instead of returning the local mediastream return your own mediastream.
This method should work in firefox and maybe others as well assuming you use an extenion/app to load the inject.js script at the start of the document. It being loaded before any of the target's libs is key to making this work.
edited for more detail
edited for even more detail
Capturing packets will only give you the network packets which you would then need to turn into frames and put into a container. A server such as Janus can record videos.
Running headless chrome and using the javascript MediaRecorder API is another option but much more heavy on resources.
I want to send streaming data (as sequences of ArrayBuffer) from a Chrome extension to a Chrome App, since Chrome message API (includes chrome.runtime.sendMessage, postMessage...) does not support ArrayBuffer and JS arrays have poor performance, I have to try other methods. Eventually, I found WebRTC over RTCDataChannel might a good solution in my case.
I have succeeded to send string over a RTCDataChannel, but when I tried to send ArrayBuffer I got:
code: 19
message: "Failed to execute 'send' on 'RTCDataChannel': Could not send data"
name: "NetworkError"
It seems that it's not a bandwidths limits problem since it failed even though I sent one byte of data. Here is my code:
pc = new RTCPeerConnection(configuration, { optional: [ { RtpDataChannels: true } ]});
//...
var dataChannel = m.pc.createDataChannel("mydata", {reliable: true});
//...
var ab = new ArrayBuffer(8);
dataChannel.send(ab);
Tested on OSX 10.10.1, Chrome M40 (Stnble), M42(Canary); and on Chromebook M40.
I have filed a bug for WebRTC here.
I modified my code, now everything worked amazing:
removed RtpDataChannels option when creating RTCPeerConnection.(YES, remove RtpDataChannels option if you want data channel, what a magic world!)
on receiver side: no need createDataChannel, instead, handle onmessage, onxxx by using event.channle from pc.ondatachannel callback:
pc.ondatachannel function(event)
var receiveChannel = event.channel;
receiveChannel.onmessage = function(event){
console.log("Got Data Channel Message:", event.data);
};
};
I am trying to include subtitles on a Chromecast application I'm building.
I am using the default receiver application.
I am writing a chrome sender application using v1 of the chrome sender api.
According to the Chromecast Sender Api documentation, I should be passing in an array of track objects into the chrome.cast.media.MediaInfo object. My issue is, whenever I call chrome.cast.media.Track(trackId, trackType), it returns undefined. When I look through the public methods in chrome.cast.media, through console, I don't see anything related to Track. Link to documentation here.
Below is my loadMedia method where I try to include an array of track objects along with my LoadRequest as specified by the cast api. The commented out code is how I've seen closed-captioning handled in one of the cast Github repositories, but unfortunately I believe you have to handle that customData in your own custom receiver application.
Are subtitles through the chrome sender SDK possible yet, or does one have to build their own receiver application and specifically handle text tracking through passed in customData? Am I potentially using the wrong sender api?
function loadMedia() {
mediaUrl = decodeURIComponent(_player.sources.mp4);
var mediaInfo = new chrome.cast.media.MediaInfo(mediaUrl);
mediaInfo.contentType = 'video/mp4';
var track1 = new chrome.cast.media.Track(1, chrome.cast.media.TrackType.TEXT);
track1.trackContentId = "https://dl.dropboxusercontent.com/u/35106650/test.vtt";
mediaInfo.tracks = [track1];
var request = new chrome.cast.media.LoadRequest(mediaInfo);
// var json = {
// cc: {
// tracks: [{
// src: "https://dl.dropboxusercontent.com/u/35106650/test.vtt"
// }],
// active: 0
// }
// };
// request.customData = json;
session.loadMedia(request, onMediaDiscovered.bind(this, 'loadMedia'), onMediaError);
}
Currently, neither the Default nor the Styled Receivers support Closed Caption; you need to create your own. We have a sample in our GitHub repo that can be used for doing exactly that.
Update: Styled and Default receivers now support Tracks, see our documentations.
I'm pretty new to the HTML5 audio api: I've read some of the related articles at HTML5 Rocks, but it can be a little tricky flipping between Javascript and Dart at times.
In any case, I've been experimenting with HTML5 Audio in Dart. To produce sound effects for a simple game, I created a class as follows. I created an AudioContext, loaded sound data into SoundBuffers, and when the sound needed to be played, created an AudioBufferSourceNode via which to play the data stored in the buffers:
class Sfx {
AudioContext audioContext;
List<Map> soundList;
int soundFiles;
Sfx() {
audioContext = new AudioContext();
soundList = new List<Map>();
var soundsToLoad = [
{"name": "MISSILE", "url": "SFX/missile.wav"},
{"name": "EXPLOSION", "url": "SFX/explosion.wav"}
];
soundFiles = soundsToLoad.length;
for (Map sound in soundsToLoad) {
initSound(sound);
}
}
bool allSoundsLoaded() => (soundFiles == 0);
void initSound(Map soundMap) {
HttpRequest req = new HttpRequest();
req.open('GET', soundMap["url"], true);
req.responseType = 'arraybuffer';
req.on.load.add((Event e) {
audioContext.decodeAudioData(
req.response,
(var buffer) {
// successful decode
print("...${soundMap["name"]} loaded...");
soundList.add({"name": soundMap["name"], "buffer": buffer});
soundFiles--;
},
(var error) {
print("error loading ${soundMap["name"]}");
}
);
});
req.send();
}
void sfx(AudioBuffer buffer) {
AudioBufferSourceNode source = audioContext.createBufferSource();
source.connect(audioContext.destination, 0, 0);
source.buffer = buffer;
source.start(0);
}
void playSound(String sound) {
for (Map m in soundList) {
print(m);
if (m["name"] == sound) {
sfx(m["buffer"]);
break;
}
}
}
}
(The sound effects are in a folder "SFX". Now that I look at the code, there are probably a million better ways to organise the data, but that's besides the point right now.) I am able to play sound effects by creating an instance of Sfx and calling the method playSound.
e.g.
#import('dart:html');
#source('sfx.dart');
Sfx sfx;
void main() {
sfx = new Sfx();
window.on.keyUp.add((KeyboardEvent keX) {
sfx.playSound("MISSILE");
});
}
(Edit: added code to play sound when a key is hit.)
The problem is: although with the dart2js Javascript, the sound effects play as expected in Safari, when they are played in Dartium or (with the dart2js Javascript) in Chrome, they are distorted. (In Firefox, there are even worse problems!)
Is there anything obvious that I have neglected to do or that I need to take into account? Otherwise, are there any references or tutorials, preferably in a Dart context, that might help?
thanks for trying Dart!
First off, Firefox doesn't support Web Audio API (yet?) Chrome and Safari support Web Audio API. You can track adoption of Web Audio API here: http://caniuse.com/#feat=audio-api
Second, please try this Web Audio API sample in Dartium: https://github.com/dart-lang/dart-html5-samples/tree/master/web/webaudio/intro You will need to clone the repo first and run it locally. This sample works for me locally.
This sounds more like a bug report. If the sample from dart-html5-samples works for you, but your above code continues to be distorted, please open a bug at http://dartbug.com/new so we can take a look.
One thing to consider is waiting until the specific MISSLE sound is loaded before hooking up the keyUp handler.
I want to write a basic script for HTML5 Web Audio API, can play some audio files. But I don't know how to unload a playing audio and load another one. In my script two audio files are playing in the same time,but not what I wanted.
Here is my code:
var context,
soundSource,
soundBuffer;
// Step 1 - Initialise the Audio Context
context = new webkitAudioContext();
// Step 2: Load our Sound using XHR
function playSound(url) {
// Note: this loads asynchronously
var request = new XMLHttpRequest();
request.open("GET", url, true);
request.responseType = "arraybuffer";
// Our asynchronous callback
request.onload = function() {
var audioData = request.response;
audioGraph(audioData);
};
request.send();
}
// This is the code we are interested in
function audioGraph(audioData) {
// create a sound source
soundSource = context.createBufferSource();
// The Audio Context handles creating source buffers from raw binary
soundBuffer = context.createBuffer(audioData, true/* make mono */);
// Add the buffered data to our object
soundSource.buffer = soundBuffer;
// Plug the cable from one thing to the other
soundSource.connect(context.destination);
// Finally
soundSource.noteOn(context.currentTime);
}
// Stop all of sounds
function stopSounds(){
// How can do this?
}
// Events for audio buttons
document.querySelector('.pre').addEventListener('click',
function () {
stopSounds();
playSound('http://thelab.thingsinjars.com/web-audio-tutorial/hello.mp3');
}
);
document.querySelector('.next').addEventListener('click',
function() {
stopSounds();
playSound('http://thelab.thingsinjars.com/web-audio-tutorial/nokia.mp3');
}
);
You should be pre-loading sounds into buffers once, at launch, and simply resetting the AudioBufferSourceNode whenever you want to play it back.
To play multiple sounds in sequence, you need to schedule them using noteOn(time), one after the other, based on buffer respective lengths.
To stop sounds, use noteOff.
Sounds like you are missing some fundamental web audio concepts. This (and more) is described in detail and shown with samples in this HTML5Rocks tutorial and the FAQ.