Why the web audio output from oscillator is not working as expected? - html5-audio

Here is the code:
I want to create an audio program that can play audio from very low frequency to high frequency.
However, this code results in different output (even with the same device):
The sound comes out suddenly - the expected result is it comes out gradually. I am sure my hearing is okay because I've asked my friends to hear;
The audio sounds different on the same frequency.
WARNING: Please adjust your volume to very low in case of any hurting before running this script.
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
// create Oscillator node
var oscillator = audioCtx.createOscillator();
var osc_arr = [];
function purgeSound(){
osc_arr.forEach(function(v){
try {
v.stop();
v.disconnect(audioCtx.destination);
} catch (e) {}
})
}
function playSoundAtFreq(fq){
purgeSound();
var osc = audioCtx.createOscillator();
osc_arr.push(osc);
osc.type = 'square';
osc.frequency.setValueAtTime(fq, audioCtx.currentTime); // value in hertz
$('#fff').val(fq);
osc.connect(audioCtx.destination);
osc.start();
}
$('#stop').click(function(){
purgeSound();
_break = true;
})
var _break = false;
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
var pointer = 0;
var go = appendAttemptAsync(10000);
async function appendAttemptAsync(range) {
if(_break) return;
var target = pointer+range;
for (pointer; pointer<range; pointer++) {
playSoundAtFreq(pointer);
console.log(pointer)
//if(pointer % 1 == 0) {
await sleep(100)
//}
}
return 5221;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<button id='stop'>stop</button>
<input id="fff" type="text" />
WARNING: Please adjust your volume to very low in case of any hurting before running this script.
Thanks for any kind of suggestions to improve my code.

If you want an Oscillator to sweep like in the YouTube video that you mentioned, you can do something like:
let osc = new OscillatorNode(audioCtx);
osc.connect(audioCtx.destination);
osc.frequency.setValueAtTime(20, audioCtx.currentTime);
osc.frequency.linearRampToValueAtTime(audioCtx.sampleRate / 2, audioCtx.currentTime + 300);
osc.start();
Change the 300 to some appropriate time over which the tone sweeps. I arbitrarily chose 5 minutes.
I do not know why your example doesn't work, but this snippet is the typical way to sweep a tone using WebAudio.

Related

Implementing Three.js SSAOPass in AFrame

I was able to successfully integrate Threejs Effect composer in aframe as a component by exporting everything as THREE.Effectcomposer, THREE.SSAOPass etc. and adding the effect inside a aframe component and i tweaked the AFrame renderer to update the effects in the scene. OutlinePass from threejs worked fine in this code but SSAO is not working and i don't get any errors. Please someone help me figure out the problem. the code for SSAOPass looks like this
AFRAME.registerComponent('ssao', {
init: function () {
this.el.addEventListener('that', evt => this.onEnter());
this.el.addEventListener('mouseleave', evt => this.onLeave());
setTimeout(() => this.el.emit("that"), 2000);
},
onEnter: function () {
const scene = this.el.sceneEl.object3D;
const camera = this.el.sceneEl.camera;
const renderer = this.el.sceneEl.renderer;
const render = renderer.render;
const composer = new THREE.EffectComposer(renderer);
//let renderPass = new THREE.RenderPass(scene, camera);
//let outlinePass = new THREE.OutlinePass(new THREE.Vector2(window.innerWidth, window.innerHeight), scene, camera);
const ssaoPass = new THREE.SSAOPass( scene, camera, window.innerWidth, window.innerHeight );
//composer.addPass(renderPass);
//composer.addPass(outlinePass);
ssaoPass.kernelRadius = 16;
composer.addPass( ssaoPass );
// let objects = [];
// this.el.object3D.traverse(node => {
// if (!node.isMesh) return;
// objects.push(node);
// });
// outlinePass.selectedObjects = objects;
// outlinePass.renderToScreen = true;
// outlinePass.edgeStrength = this.data.strength;
// outlinePass.edgeGlow = this.data.glow;
// outlinePass.visibleEdgeColor.set(this.data.color);
// HACK the AFRAME render method (a bit ugly)
const clock = new THREE.Clock();
this.originalRenderMethod = render;
let calledByComposer = false;
renderer.render = function () {
if (calledByComposer) {
render.apply(renderer, arguments);
} else {
calledByComposer = true;
composer.render(clock.getDelta());
calledByComposer = false;
}
};
},
onLeave: function () {
this.el.sceneEl.renderer.render = this.originalRenderMethod;
},
remove: function () {
this.onLeave();
}
});
I have also created a glitch project which i am sharing here. Please feel free to join and collaborate in my project
Edit link: https://glitch.com/edit/#!/accessible-torpid-partridge
Site link:https://accessible-torpid-partridge.glitch.me
Thanks in advance
The code is correct, all you need is to tweak the exposed SSAOShader uniforms: SSAOPass.kernelRadius, SSAOPass.minDistance, SSAOPass.maxDistance - like in the Three.js example.
Keep in mind - the scale in the example is huge, so the values will need to be different in a default aframe scene.
It's a good idea to be able to dynamically update a component (via setAttribute() if you properly handle updates), so you can see what's going on in realtime. Something like I did here - SSAO in a-frame (also based on Don McCurdys gist.
I've used some basic HTML elements, most threejs examples use dat.GUI - it is made for demo / debug tweaks.

progressive load and play video from base64 pieces

I have many pieces of a video in base64.
Just that I want is to play the video progressively as I receive them.
var fileInput = document.querySelector('input#theInputFile');//multiple
fileInput.addEventListener('change', function(e) {
var files = fileInput.files;
for (var i = 0; i < files.length; i++) {
var file = fileInput.files[i]
fileLoaded(file, 0, 102400, file.size);
};
e.preventDefault();
});
videoA=[];
function fileLoaded(file, ini, end, size) {
if (end>size){end=size}
var reader = new FileReader();
var fr = new FileReader();
fr.onloadend = function(e) {
if (e.target.readyState == FileReader.DONE) {
var piece = e.target.result;
display(piece.replace('data:video/mp4;base64,', ''));
}
};
var blob = file.slice(ini, end, file.type);
fr.readAsDataURL(blob);
var init = end;
var endt = init+end;
if (end<size){
fileLoaded(file, init, end, size);
}
}
Trying to display the video by chunks:
var a=0;
function display(vid, ini, end) {
videoA.push(vid);
$('#video').attr('src','data:video/mp4;base64,'+videoA[a]);
a++;
}
I know this is not the way but I`m trying to search and any response adjust to that I'm searching.
Even I'm not sure if it is possible.
Thanks!
EDIT
I've tried to play the chunks one by one and the first one is played well but the rest of them give the error:
"Uncaught (in promise) DOMException: Failed to load because no supported source was found".
If I could make the chunks to base64 correctly it's enough for me
Ok, the solution is to solve the creation of base64 pieces from the original uploaded file in the browser that can be played by an html5 player
So I've put another question asking for that.
Chunk video mp4 file into base64 pieces with javascript on browser

Live audio via socket.io 1.0

As from socket.io website
Binary streaming
Starting in 1.0, it's possible to send any blob back and forth: image, audio, video.
I'm now wondering, if this couldn't be the solution for something I'm trying to achieve recently.
I'm actually looking for a way how to broadcast live audio stream from (A - ie, mic input..) to all clients connected to a website of mine. Is something like this possible? I've been messing with WebRTC (https://www.webrtc-experiment.com/) examples but I haven't been able to manage the goal for more than few connected clients.
My idea is about something like getUserMedia or any other audio source (PCM, whatever..) on side A being chopped to chunks and provided to client and played for example by html5 audio element or anything.. I need to make that stream as much realtime as possible, no shout/ice cast services werent fast enough (indeed, they arent solution to my problem, they're meant to be used this way) and I don't really care about the audio quality. Crossplatform compatibility would be awesome.
Is something like that possible? By using socket.io as way how to provide those data to clients?
I would be very grateful for any reference, hint or source that could help me achieve this.
Thanks a lot.
This example shows you how to use the MediaRecorder to upload audio and then forward it using socket.io. This code will only broadcast after you're called mediaRecorder.stop(). You can choose to broadcast inside of ondataavailable. If you do that, you might want to pass a timeslice to mediaRecorder.start(), so that it doesn't trigger ondataavailable so often.
This solution isn't truly live, but I think it will help people who come back and find this question.
Client Code
var constraints = { audio: true };
navigator.mediaDevices.getUserMedia(constraints).then(function(mediaStream) {
var mediaRecorder = new MediaRecorder(mediaStream);
mediaRecorder.onstart = function(e) {
this.chunks = [];
};
mediaRecorder.ondataavailable = function(e) {
this.chunks.push(e.data);
};
mediaRecorder.onstop = function(e) {
var blob = new Blob(this.chunks, { 'type' : 'audio/ogg; codecs=opus' });
socket.emit('radio', blob);
};
// Start recording
mediaRecorder.start();
// Stop recording after 5 seconds and broadcast it to server
setTimeout(function() {
mediaRecorder.stop()
}, 5000);
});
// When the client receives a voice message it will play the sound
socket.on('voice', function(arrayBuffer) {
var blob = new Blob([arrayBuffer], { 'type' : 'audio/ogg; codecs=opus' });
var audio = document.createElement('audio');
audio.src = window.URL.createObjectURL(blob);
audio.play();
});
Server Code
socket.on('radio', function(blob) {
// can choose to broadcast it to whoever you want
socket.broadcast.emit('voice', blob);
});
In the Client Code you can write setInterval() instead of setTimeout() and then recursively call mediaRecorder.start() so that every 5 seconds the blob will be emitted continuously.
setInterval(function() {
mediaRecorder.stop()
mediaRecorder.start()
}, 5000);
Client Code
var constraints = { audio: true };
navigator.mediaDevices.getUserMedia(constraints).then(function(mediaStream) {
var mediaRecorder = new MediaRecorder(mediaStream);
mediaRecorder.onstart = function(e) {
this.chunks = [];
};
mediaRecorder.ondataavailable = function(e) {
this.chunks.push(e.data);
};
mediaRecorder.onstop = function(e) {
var blob = new Blob(this.chunks, { 'type' : 'audio/ogg; codecs=opus' });
socket.emit('radio', blob);
};
// Start recording
mediaRecorder.start();
// Stop recording after 5 seconds and broadcast it to server
setInterval(function() {
mediaRecorder.stop()
mediaRecorder.start()
}, 5000);
});
// When the client receives a voice message it will play the sound
socket.on('voice', function(arrayBuffer) {
var blob = new Blob([arrayBuffer], { 'type' : 'audio/ogg; codecs=opus' });
var audio = document.createElement('audio');
audio.src = window.URL.createObjectURL(blob);
audio.play();
});
Server Code
socket.on('voice', function(blob) {
// can choose to broadcast it to whoever you want
socket.broadcast.emit('voice', blob);
});

Getting the list of voices in speechSynthesis (Web Speech API)

Following HTML shows empty array in console on first click:
<!DOCTYPE html>
<html>
<head>
<script>
function test(){
console.log(window.speechSynthesis.getVoices())
}
</script>
</head>
<body>
Test
</body>
</html>
In second click you will get the expected list.
If you add onload event to call this function (<body onload="test()">), then you can get correct result on first click. Note that the first call on onload still doesn't work properly. It returns empty on page load but works afterward.
Questions:
Since it might be a bug in beta version, I gave up on "Why" questions.
Now, the question is if you want to access window.speechSynthesis on page load:
What is the best hack for this issue?
How can you make sure it will load speechSynthesis, on page load?
Background and tests:
I was testing the new features in Web Speech API, then I got to this problem in my code:
<script type="text/javascript">
$(document).ready(function(){
// Browser support messages. (You might need Chrome 33.0 Beta)
if (!('speechSynthesis' in window)) {
alert("You don't have speechSynthesis");
}
var voices = window.speechSynthesis.getVoices();
console.log(voices) // []
$("#test").on('click', function(){
var voices = window.speechSynthesis.getVoices();
console.log(voices); // [SpeechSynthesisVoice, ...]
});
});
</script>
<a id="test" href="#">click here if 'ready()' didn't work</a>
My question was: why does window.speechSynthesis.getVoices() return empty array, after page is loaded and onready function is triggered? As you can see if you click on the link, same function returns an array of available voices of Chrome by onclick triger?
It seems Chrome loads window.speechSynthesis after the page load!
The problem is not in ready event. If I remove the line var voice=... from ready function, for first click it shows empty list in console. But the second click works fine.
It seems window.speechSynthesis needs more time to load after first call. You need to call it twice! But also, you need to wait and let it load before second call on window.speechSynthesis. For example, following code shows two empty arrays in console if you run it for first time:
// First speechSynthesis call
var voices = window.speechSynthesis.getVoices();
console.log(voices);
// Second speechSynthesis call
voices = window.speechSynthesis.getVoices();
console.log(voices);
According to Web Speech API Errata (E11 2013-10-17), the voice list is loaded async to the page. An onvoiceschanged event is fired when they are loaded.
voiceschanged: Fired when the contents of the SpeechSynthesisVoiceList, that the getVoices method will return, have changed. Examples include: server-side synthesis where the list is determined asynchronously, or when client-side voices are installed/uninstalled.
So, the trick is to set your voice from the callback for that event listener:
// wait on voices to be loaded before fetching list
window.speechSynthesis.onvoiceschanged = function() {
window.speechSynthesis.getVoices();
...
};
You can use a setInterval to wait until the voices are loaded before using them however you need and then clearing the setInterval:
var timer = setInterval(function() {
var voices = speechSynthesis.getVoices();
console.log(voices);
if (voices.length !== 0) {
var msg = new SpeechSynthesisUtterance(/*some string here*/);
msg.voice = voices[/*some number here to choose from array*/];
speechSynthesis.speak(msg);
clearInterval(timer);
}
}, 200);
$("#test").on('click', timer);
After studying the behavior on Google Chrome and Firefox, this is what can get all voices:
Since it involves something asynchronous, it might be best done with a promise:
const allVoicesObtained = new Promise(function(resolve, reject) {
let voices = window.speechSynthesis.getVoices();
if (voices.length !== 0) {
resolve(voices);
} else {
window.speechSynthesis.addEventListener("voiceschanged", function() {
voices = window.speechSynthesis.getVoices();
resolve(voices);
});
}
});
allVoicesObtained.then(voices => console.log("All voices:", voices));
Note:
When the event voiceschanged fires, we need to call .getVoices() again. The original array won't be populated with content.
On Google Chrome, we don't have to call getVoices() initially. We only need to listen on the event, and it will then happen. On Firefox, listening is not enough, you have to call getVoices() and then listen on the event voiceschanged, and set the array using getVoices() once you get notified.
Using a promise makes the code more clean. Everything related to getting voices are in this promise code. If you don't use a promise but instead put this code in your speech routine, it is quite messy.
You can write a voiceObtained promise to resolve to a voice you want, and then your function to say something can just do: voiceObtained.then(voice => { }) and inside that handler, call the window.speechSynthesis.speak() to speak something. Or you can even write a promise speechReady("hello world").then(speech => { window.speechSynthesis.speak(speech) }) to say something.
heres the answer
function synthVoice(text) {
const awaitVoices = new Promise(resolve=>
window.speechSynthesis.onvoiceschanged = resolve)
.then(()=> {
const synth = window.speechSynthesis;
var voices = synth.getVoices();
console.log(voices)
const utterance = new SpeechSynthesisUtterance();
utterance.voice = voices[3];
utterance.text = text;
synth.speak(utterance);
});
}
At first i used onvoiceschanged , but it kept firing even after the voices was loaded, so my goal was to avoid onvoiceschanged at all cost.
This is what i came up with. It seems to work so far, will update if it breaks.
loadVoicesWhenAvailable();
function loadVoicesWhenAvailable() {
voices = synth.getVoices();
if (voices.length !== 0) {
console.log("start loading voices");
LoadVoices();
}
else {
setTimeout(function () { loadVoicesWhenAvailable(); }, 10)
}
}
setInterval solution by Salman Oskooi was perfect
Please see https://jsfiddle.net/exrx8e1y/
function myFunction() {
dtlarea=document.getElementById("details");
//dtlarea.style.display="none";
dtltxt="";
var mytimer = setInterval(function() {
var voices = speechSynthesis.getVoices();
//console.log(voices);
if (voices.length !== 0) {
var msg = new SpeechSynthesisUtterance();
msg.rate = document.getElementById("rate").value; // 0.1 to 10
msg.pitch = document.getElementById("pitch").value; //0 to 2
msg.volume = document.getElementById("volume").value; // 0 to 1
msg.text = document.getElementById("sampletext").value;
msg.lang = document.getElementById("lang").value; //'hi-IN';
for(var i=0;i<voices.length;i++){
dtltxt+=voices[i].lang+' '+voices[i].name+'\n';
if(voices[i].lang==msg.lang) {
msg.voice = voices[i]; // Note: some voices don't support altering params
msg.voiceURI = voices[i].voiceURI;
// break;
}
}
msg.onend = function(e) {
console.log('Finished in ' + event.elapsedTime + ' seconds.');
dtlarea.value=dtltxt;
};
speechSynthesis.speak(msg);
clearInterval(mytimer);
}
}, 1000);
}
This works fine on Chrome for MAC, Linux(Ubuntu), Windows and Android
Android has non-standard en_GB wile others have en-GB as language code
Also you will see that same language(lang) has multiple names
On Mac Chrome you get en-GB Daniel besides en-GB Google UK English Female and n-GB Google UK English Male
en-GB Daniel (Mac and iOS)
en-GB Google UK English Female
en-GB Google UK English Male
en_GB English United Kingdom
hi-IN Google हिन्दी
hi-IN Lekha (Mac and iOS)
hi_IN Hindi India
Another way to ensure voices are loaded before you need them is to bind their loading state to a promise, and then dispatch your speech commands from a then:
const awaitVoices = new Promise(done => speechSynthesis.onvoiceschanged = done);
function listVoices() {
awaitVoices.then(()=> {
let voices = speechSynthesis.getVoices();
console.log(voices);
});
}
When you call listVoices, it will either wait for the voices to load first, or dispatch your operation on the next tick.
I used this code to load voices successfully:
<select id="voices"></select>
...
function loadVoices() {
populateVoiceList();
if (speechSynthesis.onvoiceschanged !== undefined) {
speechSynthesis.onvoiceschanged = populateVoiceList;
}
}
function populateVoiceList() {
var allVoices = speechSynthesis.getVoices();
allVoices.forEach(function(voice, index) {
var option = $('<option>').val(index).html(voice.name).prop("selected", voice.default);
$('#voices').append(option);
});
if (allVoices.length > 0 && speechSynthesis.onvoiceschanged !== undefined) {
// unregister event listener (it is fired multiple times)
speechSynthesis.onvoiceschanged = null;
}
}
I found the 'onvoiceschanged' code from this article: https://hacks.mozilla.org/2016/01/firefox-and-the-web-speech-api/
Note: requires JQuery.
Works in Firefox/Safari and Chrome (and in Google Apps Script too - but only in the HTML).
async function speak(txt) {
await initVoices();
const u = new SpeechSynthesisUtterance(txt);
u.voice = speechSynthesis.getVoices()[3];
speechSynthesis.speak(u);
}
function initVoices() {
return new Promise(function (res, rej){
speechSynthesis.getVoices();
if (window.speechSynthesis.onvoiceschanged) {
res();
} else {
window.speechSynthesis.onvoiceschanged = () => res();
}
});
}
While the accepted answer works great but if you're using SPA and not loading full-page, on navigating between links, the voices will not be available.
This will run on a full-page load
window.speechSynthesis.onvoiceschanged
For SPA, it wouldn't run.
You can check if it's undefined, run it, or else, get it from the window object.
An example that works:
let voices = [];
if(window.speechSynthesis.onvoiceschanged == undefined){
window.speechSynthesis.onvoiceschanged = () => {
voices = window.speechSynthesis.getVoices();
}
}else{
voices = window.speechSynthesis.getVoices();
}
// console.log("voices", voices);
I had to do my own research for this to make sure I understood it properly, so just sharing (feel free to edit).
My goal is to:
Get a list of voices available on my device
Populate a select element with those voices (after a particular page loads)
Use easy to understand code
The basic functionality is demonstrated in MDN's official live demo of:
https://github.com/mdn/web-speech-api/tree/master/speak-easy-synthesis
but I wanted to understand it better.
To break the topic down...
SpeechSynthesis
The SpeechSynthesis interface of the Web Speech API is the controller
interface for the speech service; this can be used to retrieve
information about the synthesis voices available on the device, start
and pause speech, and other commands besides.
Source
onvoiceschanged
The onvoiceschanged property of the SpeechSynthesis interface
represents an event handler that will run when the list of
SpeechSynthesisVoice objects that would be returned by the
SpeechSynthesis.getVoices() method has changed (when the voiceschanged
event fires.)
Source
Example A
If my application merely has:
var synth = window.speechSynthesis;
console.log(synth);
console.log(synth.onvoiceschanged);
Chrome developer tools console will show:
Example B
If I change the code to:
var synth = window.speechSynthesis;
console.log("BEFORE");
console.log(synth);
console.log(synth.onvoiceschanged);
console.log("AFTER");
var voices = synth.getVoices();
console.log(voices);
console.log(synth);
console.log(synth.onvoiceschanged);
The before and after states are the same, and voices is an empty array.
Solution
Although i'm not confident implementing Promises, the following worked for me:
Defining the function
var synth = window.speechSynthesis;
// declare so that values are accessible globally
var voices = [];
function set_up_speech() {
return new Promise(function(resolve, reject) {
// get the voices
var voices = synth.getVoices();
// get reference to select element
var $select_topic_speaking_voice = $("#select_topic_speaking_voice");
// for each voice, generate select option html and append to select
for (var i = 0; i < voices.length; i++) {
var option = $("<option></option>");
var suffix = "";
// if it is the default voice, add suffix text
if (voices[i].default) {
suffix = " -- DEFAULT";
}
// create the option text
var option_text = voices[i].name + " (" + voices[i].lang + suffix + ")";
// add the option text
option.text(option_text);
// add option attributes
option.attr("data-lang", voices[i].lang);
option.attr("data-name", voices[i].name);
// append option to select element
$select_topic_speaking_voice.append(option);
}
// resolve the voices value
resolve(voices)
});
}
Calling the function
// in your handler, populate the select element
if (page_title === "something") {
set_up_speech()
}
Android Chrome - turn off data saver. It was helpfull for me.(Chrome 71.0.3578.99)
// wait until the voices load
window.speechSynthesis.onvoiceschanged = function() {
window.speechSynthesis.getVoices();
};
let voices = speechSynthesis.getVoices();
let gotVoices = false;
if (voices.length) {
resolve(voices, message);
} else {
speechSynthesis.onvoiceschanged = () => {
if (!gotVoices) {
voices = speechSynthesis.getVoices();
gotVoices = true;
if (voices.length) resolve(voices, message);
}
};
}
function resolve(voices, message) {
var synth = window.speechSynthesis;
let utter = new SpeechSynthesisUtterance();
utter.lang = 'en-US';
utter.voice = voices[65];
utter.text = message;
utter.volume = 100.0;
synth.speak(utter);
}
Works for Edge, Chrome and Safari - doesn't repeat the sentences.

Choppy/inaudible playback with chunked audio through Web Audio API

I brought this up in my last post but since it was off topic from the original question I'm posting it separately. I'm having trouble with getting my transmitted audio to play back through Web Audio the same way it would sound in a media player. I have tried 2 different transmission protocols, binaryjs and socketio, and neither make a difference when trying to play through Web Audio. To rule out the transportation of the audio data being the issue I created an example that sends the data back to the server after it's received from the client and dumps the return to stdout. Piping that into VLC results in a listening experience that you would expect to hear.
To hear the results when playing through vlc, which sounds the way it should, run the example at https://github.com/grkblood13/web-audio-stream/tree/master/vlc using the following command:
$ node webaudio_vlc_svr.js | vlc -
For whatever reason though when I try to play this same audio data through Web Audio it fails miserably. The results are random noises with large gaps of silence in between.
What is wrong with the following code that is making the playback sound so bad?
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
var delayTime = 0;
var init = 0;
var audioStack = [];
client.on('stream', function(stream, meta){
stream.on('data', function(data) {
context.decodeAudioData(data, function(buffer) {
audioStack.push(buffer);
if (audioStack.length > 10 && init == 0) { init++; playBuffer(); }
}, function(err) {
console.log("err(decodeAudioData): "+err);
});
});
});
function playBuffer() {
var buffer = audioStack.shift();
setTimeout( function() {
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
source.start(context.currentTime);
delayTime=source.buffer.duration*1000; // Make the next buffer wait the length of the last buffer before being played
playBuffer();
}, delayTime);
}
Full source: https://github.com/grkblood13/web-audio-stream/tree/master/binaryjs
You really can't just call source.start(audioContext.currentTime) like that.
setTimeout() has a long and imprecise latency - other main-thread stuff can be going on, so your setTimeout() calls can be delayed by milliseconds, even tens of milliseconds (by garbage collection, JS execution, layout...) Your code is trying to immediately play audio - which needs to be started within about 0.02ms accuracy to not glitch - on a timer that has tens of milliseconds of imprecision.
The whole point of the web audio system is that the audio scheduler works in a separate high-priority thread, and you can pre-schedule audio (starts, stops, and audioparam changes) at very high accuracy. You should rewrite your system to:
1) track when the first block was scheduled in audiocontext time - and DON'T schedule the first block immediately, give some latency so your network can hopefully keep up.
2) schedule each successive block received in the future based on its "next block" timing.
e.g. (note I haven't tested this code, this is off the top of my head):
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
var delayTime = 0;
var init = 0;
var audioStack = [];
var nextTime = 0;
client.on('stream', function(stream, meta){
stream.on('data', function(data) {
context.decodeAudioData(data, function(buffer) {
audioStack.push(buffer);
if ((init!=0) || (audioStack.length > 10)) { // make sure we put at least 10 chunks in the buffer before starting
init++;
scheduleBuffers();
}
}, function(err) {
console.log("err(decodeAudioData): "+err);
});
});
});
function scheduleBuffers() {
while ( audioStack.length) {
var buffer = audioStack.shift();
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
if (nextTime == 0)
nextTime = context.currentTime + 0.05; /// add 50ms latency to work well across systems - tune this if you like
source.start(nextTime);
nextTime+=source.buffer.duration; // Make the next buffer wait the length of the last buffer before being played
};
}