HTML5 web audio controls - html

I have music play example http://www.smartjava.org/examples/webaudio/example3.html
And i need to show html5 audio player (with controls) for this song. How i can do it?
Javascript code from example below:
// create the audio context (chrome only for now)
// create the audio context (chrome only for now)
if (! window.AudioContext) {
if (! window.webkitAudioContext) {
alert('no audiocontext found');
}
window.AudioContext = window.webkitAudioContext;
}
var context = new AudioContext();
var audioBuffer;
var sourceNode;
var analyser;
var javascriptNode;
// get the context from the canvas to draw on
var ctx = $("#canvas").get()[0].getContext("2d");
// create a gradient for the fill. Note the strange
// offset, since the gradient is calculated based on
// the canvas, not the specific element we draw
var gradient = ctx.createLinearGradient(0,0,0,300);
gradient.addColorStop(1,'#000000');
gradient.addColorStop(0.75,'#ff0000');
gradient.addColorStop(0.25,'#ffff00');
gradient.addColorStop(0,'#ffffff');
// load the sound
setupAudioNodes();
loadSound("http://www.audiotreasure.com/mp3/Bengali/04_john/04_john_04.mp3");
function setupAudioNodes() {
// setup a javascript node
javascriptNode = context.createScriptProcessor(2048, 1, 1);
// connect to destination, else it isn't called
javascriptNode.connect(context.destination);
// setup a analyzer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
// create a buffer source node
sourceNode = context.createBufferSource();
sourceNode.connect(analyser);
analyser.connect(javascriptNode);
sourceNode.connect(context.destination);
}
// load the specified sound
function loadSound(url) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// When loaded decode the data
request.onload = function() {
// decode the data
context.decodeAudioData(request.response, function(buffer) {
// when the audio is decoded play the sound
playSound(buffer);
}, onError);
}
request.send();
}
function playSound(buffer) {
sourceNode.buffer = buffer;
sourceNode.start(0);
}
// log if an error occurs
function onError(e) {
console.log(e);
}
// when the javascript node is called
// we use information from the analyzer node
// to draw the volume
javascriptNode.onaudioprocess = function() {
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
// set the fill style
ctx.fillStyle=gradient;
drawSpectrum(array);
}
function drawSpectrum(array) {
for ( var i = 0; i < (array.length); i++ ){
var value = array[i];
ctx.fillRect(i*5,325-value,3,325);
// console.log([i,value])
}
};

I think what you want is to use an audio tag for your source and use createMediaElementSource to pass the audio to webaudio for visualization.
Beware that createMediaElementSource checks for CORS access so you must have appropriate cross-origin access for this to work. (It looks like your audio source doesn't return the appropriate access headers for this to work.)

Related

progressive load and play video from base64 pieces

I have many pieces of a video in base64.
Just that I want is to play the video progressively as I receive them.
var fileInput = document.querySelector('input#theInputFile');//multiple
fileInput.addEventListener('change', function(e) {
var files = fileInput.files;
for (var i = 0; i < files.length; i++) {
var file = fileInput.files[i]
fileLoaded(file, 0, 102400, file.size);
};
e.preventDefault();
});
videoA=[];
function fileLoaded(file, ini, end, size) {
if (end>size){end=size}
var reader = new FileReader();
var fr = new FileReader();
fr.onloadend = function(e) {
if (e.target.readyState == FileReader.DONE) {
var piece = e.target.result;
display(piece.replace('data:video/mp4;base64,', ''));
}
};
var blob = file.slice(ini, end, file.type);
fr.readAsDataURL(blob);
var init = end;
var endt = init+end;
if (end<size){
fileLoaded(file, init, end, size);
}
}
Trying to display the video by chunks:
var a=0;
function display(vid, ini, end) {
videoA.push(vid);
$('#video').attr('src','data:video/mp4;base64,'+videoA[a]);
a++;
}
I know this is not the way but I`m trying to search and any response adjust to that I'm searching.
Even I'm not sure if it is possible.
Thanks!
EDIT
I've tried to play the chunks one by one and the first one is played well but the rest of them give the error:
"Uncaught (in promise) DOMException: Failed to load because no supported source was found".
If I could make the chunks to base64 correctly it's enough for me
Ok, the solution is to solve the creation of base64 pieces from the original uploaded file in the browser that can be played by an html5 player
So I've put another question asking for that.
Chunk video mp4 file into base64 pieces with javascript on browser

Adding static image to Lightswitch HTML 2013 Browse Screen

In my case, I have color coded some tiles in the HTML client and I want to add a simple color code key. I have the PNG file I want to use.
I do not require the ability to upload or change the image.
This link seems to achieve what I am looking for, but I am not sure where to implement. Does all of this code go into the PostRender of the Image Control I created?
Add image to lightswitch using local property and file location
Here is what the PostRender of the simple Image data item I created as an Image Local Property, and then dragged into the Solution Designer. It was basically copied from the link above, but I did change the name of the image file to match mine, and I have already added the item to the Content\Images folder structure and it shows in the file view:
myapp.BrowseOrderLines.ColorKey_postRender = function (element, contentItem) {
// Write code here.
function GetImageProperty(operation) {
var image = new Image();
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
// XMLHttpRequest used to allow external cross-domain resources to be processed using the canvas.
// unlike crossOrigin = "Anonymous", XMLHttpRequest works in IE10 (important for LightSwitch)
// still requires the appropriate server-side ACAO header (see https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image)
var xhr = new XMLHttpRequest();
xhr.onload = function () {
var url = URL.createObjectURL(this.response);
image.onload = function () {
URL.revokeObjectURL(url);
canvas.width = image.width;
canvas.height = image.height;
ctx.drawImage(image, 0, 0);
var dataURL = canvas.toDataURL("image/png");
operation.complete(dataURL.substring(dataURL.indexOf(",") + 1));
};
image.src = url;
};
xhr.open('GET', this.imageSource, true);
xhr.responseType = 'blob';
xhr.send();
};
myapp.BrowseOrderLines.ColorKey_postRender = function (element, contentItem) {
// Write code here.
msls.promiseOperation
(
$.proxy(
GetImageProperty,
{ imageSource: "content/images/Key.png" }
)
).then(
function (result) {
contentItem.screen.ImageProperty = result;
}
);
};
}
Currently, the Image control does show on the screen in the browser, and is the custom size I choose, but it is just a light blue area that does not display my image file.
I am not sure if I have embedded the image? I am not sure if that is a missing step?
Thank you!!
The easiest method of testing this approach would be to change your postRender to the following (which embeds the helper function within the postRender function):
myapp.BrowseOrderLines.ColorKey_postRender = function (element, contentItem) {
function GetImageProperty(imageSource) {
return msls.promiseOperation(
function (operation) {
var image = new Image();
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
// XMLHttpRequest used to allow external cross-domain resources to be processed using the canvas.
// unlike crossOrigin = "Anonymous", XMLHttpRequest works in IE10 (important for LightSwitch)
// still requires the appropriate server-side ACAO header (see https://developer.mozilla.org/en-US/docs/Web/HTML/CORS_enabled_image)
var xhr = new XMLHttpRequest();
xhr.onload = function () {
var url = URL.createObjectURL(this.response);
image.onload = function () {
URL.revokeObjectURL(url);
canvas.width = image.width;
canvas.height = image.height;
ctx.drawImage(image, 0, 0);
var dataURL = canvas.toDataURL("image/png");
operation.complete(dataURL.substring(dataURL.indexOf(",") + 1));
};
image.onerror = function () {
operation.error("Image load error");
};
image.src = url;
};
xhr.open('GET', imageSource, true);
xhr.responseType = 'blob';
xhr.send();
}
);
};
GetImageProperty("content/images/Key.png").then(function onComplete(result) {
contentItem.screen.ImageProperty = result;
}, function onError(error) {
msls.showMessageBox(error);
});
};
This assumes that you named the local property ImageProperty as per my original post and the Image control on your screen is named ColorKey.
In the above example, I've also taken the opportunity to slightly simplify and improve the code from my original post. It also includes a simple error handler which may flag up if there is a problem with loading the image.
If this still doesn't work, you could test the process by changing the image source file-name to content/images/user-splash-screen.png (this png file should have been added as a matter of course when you created your LightSwitch HTML project).
As the GetImageProperty function is a helper routine, rather than embedding it within the postRender you'd normally place it within a JavaScript helper module. This will allow it to be easily reused without repeating the function's code. If you don't already have such a library and you're interested in implementing one, the following post covers some of details involved in doing this:
Lightswitch HTML global JS file to pass variable

In chrome App, video file is not saved with its original data/size

In appCtrl.js, for saving video file -
$('#save_file').click(function(e) {
var config = {type: 'saveFile', suggestedName: chosenEntry.name};
chrome.fileSystem.chooseEntry(config, function(writableEntry) {
//blob content is the DataUrl
var blob = new Blob([$scope.blobContent], {type: 'video/mp4'});
$scope.writeFileEntry(writableEntry, blob, function(e) {
console.log('Write complete :)');
});
});
});
$scope.writeFileEntry = function(writableEntry, opt_blob, callback) {
if (!writableEntry) {
console.log('Nothing selected.');
return;
}
writableEntry.createWriter(function(writer) {
writer.onerror = $scope.errorHandler;
writer.onwriteend = callback;
// If we have data, write it to the file. Otherwise, just use the file we
// loaded.
if (opt_blob) {
writer.truncate(opt_blob.size);
$scope.waitForIO(writer, function() {
writer.seek(0);
writer.write(opt_blob);
});
}
else {
chosenEntry.file(function(file) {
writer.truncate(file.fileSize);
waitForIO(writer, function() {
writer.seek(0);
writer.write(file);
});
});
}
}, $scope.errorHandler);
}
$scope.waitForIO = function(writer, callback) {
// set a watchdog to avoid eventual locking:
var start = Date.now();
// wait for a few seconds
var reentrant = function() {
if (writer.readyState===writer.WRITING && Date.now()-start<4000) {
setTimeout(reentrant, 100);
return;
}
if (writer.readyState===writer.WRITING) {
console.error("Write operation taking too long, aborting!"+
" (current writer readyState is "+writer.readyState+")");
writer.abort();
}
else {
callback();
}
};
setTimeout(reentrant, 100);
};
In above code the video file is saved but when i tried to play that saved file in Window Media Player or VLC player , it prompt me as Window media player cannot play the file.The player might not support the file type or might not support the codec that was used to compress the file.
Can u please guide me where m getting wrong, as its my first chrome app.
Thanks in advance.
Change the method to store blob like this.
function dataURItoBlob(dataURI, callback) {
// convert base64 to raw binary data held in a string
// doesn't handle URLEncoded DataURIs
var byteString = atob(dataURI.split(',')[1]);
// separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0]
// write the bytes of the string to an ArrayBuffer
var ab = new ArrayBuffer(byteString.length);
var ia = new Uint8Array(ab);
for (var i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
}
return new Blob([ab], {type: 'video/mp4'});
};
To handle click.
$('#save_file').click(function(e) {
var config = {type: 'saveFile', suggestedName: chosenEntry.name};
chrome.fileSystem.chooseEntry(config, function(writableEntry) {
var blob = dataURItoBlob($scope.blobContent);
$scope.writeFileEntry(writableEntry, blob, function(e) {
console.log('Write complete :)');
});
});
});

WebRTC audio heard without <audio> element (RTCMultiConnection)

Audio is being heard even though no audio element seems to be put inserted in the DOM.
Scenario:
Create PeerConnection without streams
Add a stream but disable the code that adds MediaElements (audio,video) to DOM
Issue:
After the stream gets across, audio can be heard from headphones (or speakers).
What should happen:
Since I'm not attaching anything to the dom I expect no audio to be heard.
Code for replicating the scenario
// <body>
// <script src="https://cdn.webrtc-experiment.com/RTCMultiConnection.js"></script>
// <button id="start">Start!</button>
// </body>
$('#start').click(function() {
var NO_MEDIA_SESSION = {video: false, audio: false, oneway: true};
var caller = new RTCMultiConnection('lets-try');
caller.session = NO_MEDIA_SESSION;
caller.dontAttachStream = true;
caller.onstream = function() { console.log("Got stream but not attaching") };
var receiver = new RTCMultiConnection('lets-try');
receiver.session = NO_MEDIA_SESSION;
receiver.dontAttachStream = true;
receiver.onstream = function() { console.log("Got stream but not attaching") };
caller.open();
receiver.connect();
receiver.onconnected = function() {
console.log("Connected!");
caller.addStream({audio: true});
}
});
I'm interested how is it possible to hear MediaStream without there being audio DOM element?
If any RTCMultiConnection specialists answering, then maybe point me how to avoid audio stream being made audible? (I want to get the stream and attach it later myself).
RTCMultiConnection creates mediaElement on the fly to make sure onstream event is fired only when media stream started flowing.
connection.onstream = function(event) {
event.mediaElement.pause(); // or volume=0
// or
event.mediaElement = null;
// or
delete event.mediaElement;
};
Updated:
Use following snippet:
var connection = new RTCMultiConnection();
connection.session = {
data: true
};
btnOpenRoom.onclick = function() {
connection.open('roomid');
};
btnJoinRoom.onclick = function() {
connection.join('roomid');
};
btnAddAudioStream.onclick = function() {
connection.addStream({
audio: true
});
};
btnAddAudioVideoStream.onclick = function() {
connection.addStream({
audio: true,
video: true
});
};

How to get media stream object form HTML5 video element in javascript

all
I'm in peer to peer communication using webRTC , we have media stream object from the getUserMedia which is given as input stream to peerconnection. Here I need video stream from the selected video file from the local drive which is playing using Video element of HTML5.
Is it possible to create mediastream object from the video tag?
thanks,
suri
For now you can't add a media stream from a video tag, but it should be possible in the future, as it is explained on MDN
MediaStream objects have a single input and a single output. A MediaStream object generated by getUserMedia() is called local, and has as its source input one of the user's cameras or microphones. A non-local MediaStream may be representing to a media element, like or , a stream originating over the network, and obtained via the WebRTC PeerConnection API, or a stream created using the Web Audio API MediaStreamAudioSourceNode.
But you can use Media Source Extensions API to do what yo want : you have to put the local file into a stream and append in in a MediaSource object. You can learn more about MSE here : http://www.w3.org/TR/media-source/
And you can find a demo and source of the method above here
2021 update: It is now possible using MediaRecorder interface: https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder
Example from same page:
if (navigator.mediaDevices) {
console.log('getUserMedia supported.');
var constraints = { audio: true };
var chunks = [];
navigator.mediaDevices.getUserMedia(constraints)
.then(function(stream) {
var mediaRecorder = new MediaRecorder(stream);
visualize(stream);
record.onclick = function() {
mediaRecorder.start();
console.log(mediaRecorder.state);
console.log("recorder started");
record.style.background = "red";
record.style.color = "black";
}
stop.onclick = function() {
mediaRecorder.stop();
console.log(mediaRecorder.state);
console.log("recorder stopped");
record.style.background = "";
record.style.color = "";
}
mediaRecorder.onstop = function(e) {
console.log("data available after MediaRecorder.stop() called.");
var clipName = prompt('Enter a name for your sound clip');
var clipContainer = document.createElement('article');
var clipLabel = document.createElement('p');
var audio = document.createElement('audio');
var deleteButton = document.createElement('button');
clipContainer.classList.add('clip');
audio.setAttribute('controls', '');
deleteButton.innerHTML = "Delete";
clipLabel.innerHTML = clipName;
clipContainer.appendChild(audio);
clipContainer.appendChild(clipLabel);
clipContainer.appendChild(deleteButton);
soundClips.appendChild(clipContainer);
audio.controls = true;
var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
chunks = [];
var audioURL = URL.createObjectURL(blob);
audio.src = audioURL;
console.log("recorder stopped");
deleteButton.onclick = function(e) {
evtTgt = e.target;
evtTgt.parentNode.parentNode.removeChild(evtTgt.parentNode);
}
}
mediaRecorder.ondataavailable = function(e) {
chunks.push(e.data);
}
})
.catch(function(err) {
console.log('The following error occurred: ' + err);
})
}
MDN also has a detailed mini tutorial: https://developer.mozilla.org/en-US/docs/Web/API/MediaStream_Recording_API/Recording_a_media_element