Is it possible to create a multichannel `MediaElementAudioSourceNode` from a multichannel `HTMLAudioElement`? - html

It's possible to load a multichannel WAV (i.e. first order ambisonics) file using an html <audio> element, and create a MediaElementAudioSourceNode from that element.
Regarding the channelCount property of that MediaElementAudioSourceNode, the documentation states that this will depend on the HTMLMediaElement the node was created from, but when trying this the channel count is always 2.
Is it possible to create a node that has the channel count corresponding to the number of channels of the loaded file ?
Best,
N

To answer my own question, it seems like all tracks are loaded and played back, you just need to adjust some values. I set the channel count to 4 and channel interpretation to discrete, then used a channel splitter to route the channels individually to an analyzer, e voila, I can see all the different waveforms. Example below.
<html>
<body>
<h1> Multitrack Experiment </h1>
<p>This is a little experiment to see if all channels of a multichannel audio file are played back,
which they are.</p>
<p>The audio file contains four different waveforms on four channels. (0 -> sine, 1 -> saw, 2 -> square, 3 -> noise)</p>
<audio src="tracktest.wav"></audio>
<button>
<span>Play/Pause</span>
</button>
<br/>
<br/>
<canvas id="oscilloscope"></canvas>
</body>
<script>
// for legacy browsers
//const AudioContext = window.AudioContext || window.webkitAudioContext;
var playing = false;
const audioContext = new AudioContext();
// get the audio element
const audioElement = document.querySelector('audio');
// pass it into the audio context
// configure the mediaelement source correctly
// (otherwise it still shows 2 channels)
// also change channel interpretation while you're at it ...
const track = audioContext.createMediaElementSource(audioElement);
track.outputChannels = 4;
track.channelInterpretation = 'discrete';
// just for monitoring purposes
track.connect(audioContext.destination);
// split channels to be able to
const splitter = audioContext.createChannelSplitter(4);
track.connect(splitter);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
// uncomment the different options to see the different results ...
splitter.connect(analyser, 0, 0);
//splitter.connect(analyser, 1, 0);
//splitter.connect(analyser, 2, 0);
//splitter.connect(analyser, 3, 0);
// select our play button
const playButton = document.querySelector('button');
playButton.addEventListener('click', function() {
// check if context is in suspended state (autoplay policy)
if (audioContext.state === 'suspended') {
audioContext.resume();
}
console.log(track)
// play or pause track depending on state
if (!playing) {
console.log("play");
audioElement.play();
playing = true;
} else {
console.log("stop");
audioElement.pause();
playing = false;
}
}, false);
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
analyser.getByteTimeDomainData(dataArray);
// Get a canvas defined with ID "oscilloscope"
const canvas = document.getElementById("oscilloscope");
const canvasCtx = canvas.getContext("2d");
// draw an oscilloscope of the current audio source
function draw() {
requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillStyle = "rgb(200, 200, 200)";
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "rgb(0, 0, 0)";
canvasCtx.beginPath();
const sliceWidth = canvas.width * 1.0 / bufferLength;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
const v = dataArray[i] / 128.0;
const y = v * canvas.height / 2;
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
}
draw();
</script>
</html>

Related

shadowBlur (html canvas) not working in js loop

I was trying to improve my stars in the sky animation which I created using JS. This is when i discovered, I could use the shadowBlur property to change the size of the shadow created around my stars, to make it look like they are flickering. The issue now is that the shadowBlur goes up but doesn't go back to black. Here is the code I have used. Any help with this will be greatly appreciated :).
Have a great day!
// ---- Vars for star animation
let randomStars = [];
let starCollection = [];
let numberofStars = 50;
let flickeringStars = 50;
class Star{
constructor(x,y,color,radius,shadowBlur){
this._canvas = document.querySelector('canvas');
this._canvas.width = window.innerWidth;
this._canvas.height = window.innerHeight;
this._c = this._canvas.getContext('2d');
this._radius = radius;
this._x = x;
this._y = y;
this._color = color;
this._shadowBlur = 10;
this._shadowColor = 'white';
}
//drawing individual stars
draw(){
this._c.beginPath();
this._c.arc(this._x,this._y,this._radius,0,Math.PI * 2,false);
this._c.fillStyle = this._color;
this._c.strokeStyle = 'black';
this._c.shadowColor = this._shadowColor;
this._c.shadowBlur = this._shadowBlur;
this._c.stroke();
this._c.fill();
this._c.closePath();
}
//Fade in and out for stars
flicker(){
setTimeout(()=>{this._shadowBlur=10;},200);
setTimeout(()=>{this._shadowBlur=8;},400);
setTimeout(()=>{this._shadowBlur=6;},600);
setTimeout(()=>{this._shadowBlur=4;},800);
setTimeout(()=>{this._shadowBlur=2;},1000);
setTimeout(()=>{this._shadowBlur=0;},1200);
setTimeout(()=>{this._shadowBlur=2;},1400);
setTimeout(()=>{this._shadowBlur=4;},1600);
setTimeout(()=>{this._shadowBlur=6;},1800);
setTimeout(()=>{this._shadowBlur=8;},2000);
setTimeout(()=>{this._shadowBlur=10;},2200);
setTimeout(()=>{this.draw();},200);
setTimeout(()=>{this.draw();},400);
setTimeout(()=>{this.draw();},600);
setTimeout(()=>{this.draw();},800);
setTimeout(()=>{this.draw();},1000);
setTimeout(()=>{this.draw();},1200);
setTimeout(()=>{this.draw();},1400);
setTimeout(()=>{this.draw();},1600);
setTimeout(()=>{this.draw();},1800);
setTimeout(()=>{this.draw();},2000);
setTimeout(()=>{this.draw();},2200);
}
//Clears the canvas
clearstars(){
this._c.clearRect(0,0,window.innerWidth, window.innerHeight);
}
}
// ---- Functions ----
//Typing animation
const typingAnimation = ()=>{
if(textProgress < text.length){
setTimeout(()=>{requestAnimationFrame(typingAnimation)}, speed);
if(text.charAt(textProgress) === '\n')document.getElementById('animation-text').innerHTML += '</br>';
document.getElementById('animation-text').innerHTML += text.charAt(textProgress);
textProgress ++;
}else{
let event = new CustomEvent('showStars');
dispatchEvent(event);
}
}
//Store stars
const storeStars = ()=>{
starCollection = [];
for(let i=0;i<numberofStars;i++){
let x = Math.floor(Math.random()*window.innerWidth);
let y = Math.floor(Math.random()*window.innerHeight);
starCollection.push(new Star(x,y,"white",(Math.random()+1)-0.7));
}
}
//Show stars to the screen
const showStars = ()=>{
for(let i=0;i<starCollection.length;i++){
starCollection[i].draw();
}
}
//Store random stars
const generateRandomStars = ()=>{
randomStars = [];
for(let i=0;i<flickeringStars;i++){
let x = Math.floor(Math.random()*window.innerWidth);
let y = Math.floor(Math.random()*window.innerHeight);
randomStars.push(new Star(x,y,"white",(Math.random()+1)-0.7));
}
}
//Show randoms stars after clearing previous set of flickering stars
const showRandomStars = ()=>{
let id = window.setTimeout(function () { }, 0);
while (id--) {
window.clearTimeout(id);
}
let starHandler = new Star(0,0,"white",0);
starHandler.clearstars();
showStars();
flickerStars();
}
//Flickers stars and changes set of stars randomly
const flickerStars = ()=>{
for(let i=0;i<flickeringStars;i++){
setInterval(()=>{
randomStars[i].flicker();
},2200);
setInterval(()=>{
console.log("changing stars pattern");
generateRandomStars();
showRandomStars();
},12200);
}
}
// ---- Event Listeners ----
//Typing animation on load
window.addEventListener("load", ()=>{
storeStars();
generateRandomStars();
showStars();
flickerStars();
});
//Handles star animation scaling on window resize
window.addEventListener("resize", ()=>{
let id = window.setTimeout(function () { }, 0);
while (id--) {
window.clearTimeout(id);
}
let starHandler = new Star(0,0,"white",0);
starHandler.clearstars();
generateRandomStars();
storeStars();
showStars();
flickerStars();
});
body{
background-color:black;
}
<html>
<body><canvas></canvas></body>
</html>
I can not work out what FX you are trying to get. Below is a simple flickering star animation that uses small rectangles that change size to simulate flicker.
The frame rate is reduced so that the flicker is more pronounced.
It is very efficient and does not require complex state changes, or slow render methods (like blur)
var W = 0, H = 0; // hold canvas size. Set to zero so first frame sizes canvas
var count = 500; // number of stars
var frame = 0; // current frame number
const frameRate = 5; // render stars ever 5 frames
const sizeRange = 1.5; // max size of star us minSize + sizeRange + flickerSize
const minSize = 1; // minimum size of star
const flickerSize = 1; // amount of change random change to make a star brighter
const flickerRate = 0.1; // odds per rendered frame that a star flickers
const stars = [];
// Create a random set of numbers for star positions.
// Values must be larger than largest canvas you are going to use.
// This will work up to 8K display
while (count--) { stars.push(Math.random() * 1024 * 8) }
const ctx = canvas.getContext("2d");
requestAnimationFrame(mainLoop);
function mainLoop() {
var len = stars.length, i = 0, x = stars[i++], starSize;
// if the window has resized change the canvas to fit
if (W !== innerWidth || H !== innerHeight) {
W = canvas.width = innerWidth;
H = canvas.height = innerHeight;
ctx.fillStyle = "#FFF";
frame = 0;
}
if (frame++ % frameRate === 0) { // only render every frameRate frames
ctx.clearRect(0, 0, W, H);
ctx.beginPath(); // draw all stars with one path aas it is several orders
// of magnitude quicker than creating a path for each
while (i < len) {
// draws small stars to large
const starScale = (i / len); // set scale from 0 to 1
starSize = sizeRange; // set the range of sizes
if (Math.random() < flickerRate) { // on random odds of flicker
starSize += flickerSize * Math.random(); // add random flicker size
}
starSize *= starScale; // scale the star
starSize += minSize; // add min size of star
halfSize = starSize / 2; // offset to top left of star
const y = stars[i++]; // get next random number as star y pos
// add rect to path fitted to canvas width and height (W, H)
ctx.rect((x % W) - halfSize , (y % H) - halfSize , starSize , starSize );
x = y; // Use y for the next x star coordinate
}
ctx.fill(); // fill in all the stars
}
requestAnimationFrame(mainLoop);
}
canvas {
position: absolute;
top: 0px;
left: 0px;
background: #000;
}
<canvas id="canvas"></canvas>

Merge or Use instance and Native render make no difference with Chrome plus Integrated Graphics Card [duplicate]

I am testing the FPS with my laptop using the Intel(R) Iris(R) Plus Graphics 655 card.
To test the threeJS example with Instance rendering and merge-drawcall rendering.
So I used both the QRCode_buffergeometry.json model and the suzanne_buffergeometry.json model.
for the QRCode_buffergeometry.json: vertex:12852, face: 4284
and for the suzanne_buffergeometry.json: vertex:1515 face: 967
Then the FPS for the suzanne_buffergeometry with 8000 count:
INSTANCE: 36
MERGED: 43
NATIVE: from 23 to 35 by rotation
for the QRCode_buffergeometry model with 8000 count:
INSTANCE: 9
MERGED: 15-17
NATIVE: 17-19
I am very confused with this performance.
1. As far as my understanding, with no matter if i use instance or merge-drawcall, the drawcall is fixed to be 1 and the total face number to draw is same, why merged-drawcall is better than instance? Since the face and vertex number are both same, I suppose what happened in the vertex shader for transform the vertex should be same too, so why merged is faster?
For the QRCode_buffergeometry model, native is almost same as merged, and better than instance, so I guess the CPU is not the bottle neck but the GPU is, however the final drawing data should be same, i mean eventually the face number to be draw should be same, why native is faster?, isn't that the instance is supposed to be the best way? I am pretty sure the camera's far and near is big enough, so there should not be any culling issue.
When I am trying to optimize some big scene, when should I pick merge? when to pick instance? and when maybe no doing anything is better?
Any help?
Thanks a lot~~~
Attached the code for the sample is here
body { margin: 0; }
<div id="container"></div>
<script type="module">
import * as THREE from 'https://cdn.jsdelivr.net/npm/three#0.112.1/build/three.module.js';
import Stats from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/libs/stats.module.js';
import {
GUI
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/libs/dat.gui.module.js';
import {
OrbitControls
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/controls/OrbitControls.js';
import {
BufferGeometryUtils
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/utils/BufferGeometryUtils.js';
var container, stats, gui, guiStatsEl;
var camera, controls, scene, renderer, material;
// gui
var Method = {
INSTANCED: 'INSTANCED',
MERGED: 'MERGED',
NAIVE: 'NAIVE'
};
var api = {
method: Method.INSTANCED,
mesh_number: 1,
count_per_mesh: 1000
};
var modelName = 'suzanne_buffergeometry.json';
var modelScale = (modelName === 'suzanne_buffergeometry.json' ? 1 : 0.01);
var modelVertex = (modelName === 'suzanne_buffergeometry.json' ? 1515 : 12852);
var modelFace = (modelName === 'suzanne_buffergeometry.json' ? 967 : 4284);
//
init();
initMesh();
animate();
//
function clean() {
var meshes = [];
scene.traverse(function(object) {
if (object.isMesh) meshes.push(object);
});
for (var i = 0; i < meshes.length; i++) {
var mesh = meshes[i];
mesh.material.dispose();
mesh.geometry.dispose();
scene.remove(mesh);
}
}
var randomizeMatrix = function() {
var position = new THREE.Vector3();
var rotation = new THREE.Euler();
var quaternion = new THREE.Quaternion();
var scale = new THREE.Vector3();
return function(matrix) {
position.x = Math.random() * 40 - 20;
position.y = Math.random() * 40 - 20;
position.z = Math.random() * 40 - 20;
rotation.x = Math.random() * 2 * Math.PI;
rotation.y = Math.random() * 2 * Math.PI;
rotation.z = Math.random() * 2 * Math.PI;
quaternion.setFromEuler(rotation);
scale.x = scale.y = scale.z = Math.random() * modelScale;
matrix.compose(position, quaternion, scale);
};
}();
function initMesh() {
clean();
console.time(api.method + ' (build)');
for (var i = 0; i < api.mesh_number; i++) {
// make instances
new THREE.BufferGeometryLoader()
.setPath('https://threejs.org/examples/models/json/')
.load(modelName, function(geometry) {
material = new THREE.MeshNormalMaterial();
geometry.computeVertexNormals();
switch (api.method) {
case Method.INSTANCED:
makeInstanced(geometry);
break;
case Method.MERGED:
makeMerged(geometry);
break;
case Method.NAIVE:
makeNaive(geometry);
break;
}
});
}
console.timeEnd(api.method + ' (build)');
var drawCalls = 0;
switch (api.method) {
case Method.INSTANCED:
case Method.MERGED:
drawCalls = api.mesh_number;
break;
case Method.NAIVE:
drawCalls = api.mesh_number * api.count_per_mesh;
break;
}
guiStatsEl.innerHTML = [
'<i>GPU draw calls</i>: ' + drawCalls,
'<i>Face Number</i>: ' + (modelFace * api.mesh_number * api.count_per_mesh),
'<i>Vertex Number</i>: ' + (modelVertex * api.mesh_number * api.count_per_mesh)
].join('<br/>');
}
function makeInstanced(geometry, idx) {
var matrix = new THREE.Matrix4();
var mesh = new THREE.InstancedMesh(geometry, material, api.count_per_mesh);
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
mesh.setMatrixAt(i, matrix);
}
scene.add(mesh);
}
function makeMerged(geometry, idx) {
var instanceGeometry;
var geometries = [];
var matrix = new THREE.Matrix4();
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
var instanceGeometry = geometry.clone();
instanceGeometry.applyMatrix(matrix);
geometries.push(instanceGeometry);
}
var mergedGeometry = BufferGeometryUtils.mergeBufferGeometries(geometries);
scene.add(new THREE.Mesh(mergedGeometry, material));
}
function makeNaive(geometry, idx) {
var matrix = new THREE.Matrix4();
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
var mesh = new THREE.Mesh(geometry, material);
mesh.applyMatrix(matrix);
scene.add(mesh);
}
}
function init() {
var width = window.innerWidth;
var height = window.innerHeight;
// camera
camera = new THREE.PerspectiveCamera(70, width / height, 1, 100);
camera.position.z = 30;
// renderer
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(width, height);
renderer.outputEncoding = THREE.sRGBEncoding;
container = document.getElementById('container');
container.appendChild(renderer.domElement);
// scene
scene = new THREE.Scene();
scene.background = new THREE.Color(0xffffff);
// controls
controls = new OrbitControls(camera, renderer.domElement);
controls.autoRotate = true;
// stats
stats = new Stats();
container.appendChild(stats.dom);
// gui
gui = new GUI();
gui.add(api, 'method', Method).onChange(initMesh);
gui.add(api, 'count_per_mesh', 1, 20000).step(1).onChange(initMesh);
gui.add(api, 'mesh_number', 1, 200).step(1).onChange(initMesh);
var perfFolder = gui.addFolder('Performance');
guiStatsEl = document.createElement('li');
guiStatsEl.classList.add('gui-stats');
perfFolder.__ul.appendChild(guiStatsEl);
perfFolder.open();
// listeners
window.addEventListener('resize', onWindowResize, false);
Object.assign(window, {
scene
});
}
//
function onWindowResize() {
var width = window.innerWidth;
var height = window.innerHeight;
camera.aspect = width / height;
camera.updateProjectionMatrix();
renderer.setSize(width, height);
}
function animate() {
requestAnimationFrame(animate);
controls.update();
stats.update();
render();
}
function render() {
renderer.render(scene, camera);
}
//
function getGeometryByteLength(geometry) {
var total = 0;
if (geometry.index) total += geometry.index.array.byteLength;
for (var name in geometry.attributes) {
total += geometry.attributes[name].array.byteLength;
}
return total;
}
// Source: https://stackoverflow.com/a/18650828/1314762
function formatBytes(bytes, decimals) {
if (bytes === 0) return '0 bytes';
var k = 1024;
var dm = decimals < 0 ? 0 : decimals;
var sizes = ['bytes', 'KB', 'MB'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];
}
</script>
This is only guesses
Three.js by default culls if things are outside the frustum.
We can turn this off with mesh.frustumCulled = false. I didn't notice a difference and this should show up in the draw count.
Three.js by default sorts opaque objects back to front.
This means everything else being equal, sorted will run faster
than unsorted because of the depth test. If I set the depth test
to always
material.depthFunc = THREE.AlwaysDepth
Then I seem to get slightly faster rendering with instanced vs native. Of course
everything else is not equal.
An issue in Chrome.
If I run in Firefox or Safari I get the expected results. Merged > Instanced > Native
It could be a bug or it could be they're working around a driver or
security issue that the other browsers are not. You'd have to ask.

Why does this P5.js web-cam motion-detection code result in my browser crashing?

I can't see why this code doesn't work. There are no syntax errors displayed - and no 'errors' in the Chrome Development console.
No black image is displayed if movement and the page seems to be loading non-stop (as if caught in a loop.) - the light on the camera doesn't light either.
I have commented out sections of code and the problem occurs during the looping stage. (I can get the camera to display an image if the pixel checking doesn't take place).
In the User Messages in the console - it advises me that:
p5.js says: dist() was expecting at least 4 arguments, but received only 3.
As you can see from the code - it does have the correct amount of arguments.
I believe that the browser 'crashes' as these messages in the console are in the 10,000s before it crashes - if this is the case: how do I stop that?
// Variable for capture device
var video;
// Previous Frame
var prevFrame;
// How different must a pixel be to be a "motion" pixel
var threshold = 50;
function setup() {
createCanvas(320, 240);
pixelDensity(1);
video = createCapture(VIDEO);
video.size(width, height);
video.hide();
// Create an empty image the same size as the video
prevFrame = createImage(video.width, video.height);
}
function draw() {
image(prevFrame, 0, 0);
loadPixels();
video.loadPixels();
prevFrame.loadPixels();
// Begin loop to walk through every pixel
for (var x = 0; x < video.width; x++) {
for (var y = 0; y < video.height; y++) {
// Step 1, what is the location into the array
var loc = (x + y * video.width) * 4;
// Step 2, what is the previous color
var r1 = prevFrame.pixels[loc];
var g1 = prevFrame.pixels[loc + 1];
var b1 = prevFrame.pixels[loc + 2];
// Step 3, what is the current color
var r2 = video.pixels[loc];
var g2 = video.pixels[loc + 1];
var b2 = video.pixels[loc + 2];
// Step 4, compare colors (previous vs. current)
var diff = dist(r1, g1, b1, r2, g2, b2);
// Step 5, How different are the colors?
// If the color at that pixel has changed, then there is motion at that pixel.
if (diff > threshold) {
// If motion, display black
pixels[loc] = 0;
pixels[loc+1] = 0;
pixels[loc+2] = 0;
pixels[loc+3] = 0;
} else {
// If not, display white
pixels[loc] = 255;
pixels[loc+1] = 255;
pixels[loc+2] = 255;
pixels[loc+3] = 255;
}
}
}
updatePixels();
// Save frame for the next cycle
//if (video.canvas) {
prevFrame.copy(video, 0, 0, video.width, video.height, 0, 0, video.width, video.height); // Before we read the new frame, we always save the previous frame for comparison!
//}
}

Mute/unmute individual WebRTC peer connections in Firefox

In my app, I have multiple open peer connections and I want to be able to mute the microphone on the peer connection level, not globally (as is done here).
Chrome is straightforward:
Call removeStream when muting
Call addStream when unmuting
Negative: I understand that we are moving towards a addTrack/removeTrack world, so this solution is not compatible with other browsers and the future.
Firefox does not work at all:
removeTrack/addTrack requires renegotiation, which is not acceptable, as it takes time
replaceTrack does not require renegotiation and my idea would be to have an empty MediaStreamTrack for mute that I could use to replace the former MediaStreamTrack. Any idea how to do that?
Alternatively, any ideas on a viable Firefox solution / a cooler Chrome solution / a unified approach?
The way to do it in Firefox (and Chrome, and the future) is to clone the tracks, to give you independent track.enabled controls:
var track1, track2;
navigator.mediaDevices.getUserMedia({audio: true}).then(stream => {
var clone = stream.clone();
track1 = stream.getAudioTracks()[0];
track2 = clone.getAudioTracks()[0];
})
var toggle = track => track.enabled = !track.enabled;
Try it below (use https fiddle in Chrome):
var track1, track2;
navigator.mediaDevices.getUserMedia({audio: true}).then(stream => {
var clone = stream.clone();
track1 = stream.getAudioTracks()[0];
track2 = clone.getAudioTracks()[0];
return Promise.all([spectrum(stream), spectrum(clone)]);
}).catch(e => console.log(e));
var toggle = track => track && (track.enabled = !track.enabled);
var spectrum = stream => {
var audioCtx = new AudioContext();
var analyser = audioCtx.createAnalyser();
var source = audioCtx.createMediaStreamSource(stream);
source.connect(analyser);
var canvas = document.createElement("canvas");
var canvasCtx = canvas.getContext("2d");
canvas.width = window.innerWidth/2 - 20;
canvas.height = window.innerHeight/2 - 20;
document.body.appendChild(canvas);
var data = new Uint8Array(canvas.width);
canvasCtx.strokeStyle = 'rgb(0, 125, 0)';
setInterval(() => {
canvasCtx.fillStyle = "#a0a0a0";
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
analyser.getByteFrequencyData(data);
canvasCtx.lineWidth = 2;
data.forEach((y, x) => {
y = canvas.height - (y / 128) * canvas.height / 4;
var c = Math.floor((x*255)/canvas.width);
canvasCtx.fillStyle = "rgb("+c+",0,"+(255-x)+")";
canvasCtx.fillRect(x, y, 2, canvas.height - y)
});
analyser.getByteTimeDomainData(data);
canvasCtx.lineWidth = 5;
canvasCtx.beginPath();
data.forEach((y, x) => {
y = canvas.height - (y / 128) * canvas.height / 2;
x ? canvasCtx.lineTo(x, y) : canvasCtx.moveTo(x, y);
});
canvasCtx.stroke();
var bogus = source; // avoid GC or the whole thing stops
}, 1000 * canvas.width / audioCtx.sampleRate);
};
<button onclick="toggle(track1)">Mute A</button>
<button onclick="toggle(track2)">Mute B</button><br>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
Then feed the two tracks to different peer connections. This works with video mute too.

Does Canvas redraw itself every time anything changes?

I have done some research on how canvas works. It is supposed to be "immediate mode" means that it does not remember what its drawing looks like, only the bitmap remains everytime anything changes.
This seems to suggest that canvas does not redraw itself on change.
However, when I tested canvas on iPad (basically I keep drawing parallel lines on the canvas), the frame rate degrades rapidly when there are more lines on the canvas. Lines are drawn more slowly and in a more jumpy way.
Does this mean canvas actually have to draw the whole thing on change? Or there is other reason for this change in performance?
The HTML canvas remembers the final state of pixels after each stroke/fill call is made. It never redraws itself. (The web browser may need to re-blit portions of the final image to the screen, for example if another HTML object is moved over the canvas and then away again, but this is not the same as re-issuing the drawing commands.
The context always remembers its current state, including any path that you have been accumulating. It is probable that you are (accidentally) not clearing your path between 'refreshes', and so on the first frame you are drawing one line, on the second frame two lines, on the third frame three lines, and so forth. (Are you calling ctx.closePath() and ctx.beginPath()? Are you clearing the canvas between drawings?)
Here's an example showing that the canvas does not redraw itself. Even at tens of thousands of lines I see the same frame rate as with hundreds of lines (capped at 200fps on Chrome, ~240fps on Firefox 8.0, when drawing 10 lines per frame).
var lastFrame = new Date, avgFrameMS=5, lines=0;
function drawLine(){
ctx.beginPath();
ctx.moveTo(Math.random()*w,Math.random()*h);
ctx.lineTo(Math.random()*w,Math.random()*h);
ctx.closePath();
ctx.stroke();
var now = new Date;
var frameTime = now - lastFrame;
avgFrameMS += (frameTime-avgFrameMS)/20;
lastFrame = now;
setTimeout(drawLine,1);
lines++;
}
drawLine();
// Show the stats infrequently
setInterval(function(){
fps.innerHTML = (1000/avgFrameMS).toFixed(1);
l.innerHTML = lines;
},1000);
Seen in action: http://phrogz.net/tmp/canvas_refresh_rate.html
For more feedback on what your code is actually doing versus what you suspect it is doing, share your test case with us.
Adding this answer to be more general.
It really depends on what the change is. If the change is simply to add another path to the previously drawn context, then the canvas does not have to be redrawn. Simply add the new path to the present context state. The previously selected answer reflects this with an excellent demo found here.
However, if the change is to translate or "move" an already drawn path to another part of the canvas, then yes, the whole canvas has to be redrawn. Imagine the same demo linked above accumulating lines while also rotating about the center of the canvas. For every rotation, the canvas would have to be redrawn, with all previously drawn lines redrawn at the new angle. This concept of redrawing on translation is fairly self-evident, as the canvas has no method of deleting from the present context. For simple translations, like a dot moving across the canvas, one could draw over the dot's present location and redraw the new dot at the new, translated location, all on the same context. This may or may not be more operationally complex than just redrawing the whole canvas with the new, translated dot, depending on how complex the previously drawn objects are.
Another demo to demonstrate this concept is when rendering an oscilloscope trace via the canvas. The below code implements a FIFO data structure as the oscilloscope's data, and then plots it on the canvas. Like a typical oscilloscope, once the trace spans the width of the canvas, the trace must translate left to make room for new data points on the right. To do this, the canvas must be redrawn every time a new data point is added.
function rand_int(min, max) {
min = Math.ceil(min);
max = Math.floor(max);
return Math.floor(Math.random() * (max - min + 1) + min); //The maximum is inclusive and the minimum is inclusive
}
function Deque(max_len) {
this.max_len = max_len;
this.length = 0;
this.first = null;
this.last = null;
}
Deque.prototype.Node = function(val, next, prev) {
this.val = val;
this.next = next;
this.prev = prev;
};
Deque.prototype.push = function(val) {
if (this.length == this.max_len) {
this.pop();
}
const node_to_push = new this.Node(val, null, this.last);
if (this.last) {
this.last.next = node_to_push;
} else {
this.first = node_to_push;
}
this.last = node_to_push;
this.length++;
};
Deque.prototype.pop = function() {
if (this.length) {
let val = this.first.val;
this.first = this.first.next;
if (this.first) {
this.first.prev = null;
} else {
this.last = null;
}
this.length--;
return val;
} else {
return null;
}
};
Deque.prototype.to_string = function() {
if (this.length) {
var str = "[";
var present_node = this.first;
while (present_node) {
if (present_node.next) {
str += `${present_node.val}, `;
} else {
str += `${present_node.val}`
}
present_node = present_node.next;
}
str += "]";
return str
} else {
return "[]";
}
};
Deque.prototype.plot = function(canvas) {
const w = canvas.width;
const h = canvas.height;
const ctx = canvas.getContext("2d");
ctx.clearRect(0, 0, w, h);
//Draw vertical gridlines
ctx.beginPath();
ctx.setLineDash([2]);
ctx.strokeStyle = "rgb(124, 124, 124)";
for (var i = 1; i < 9; i++) {
ctx.moveTo(i * w / 9, 0);
ctx.lineTo(i * w / 9, h);
}
//Draw horizontal gridlines
for (var i = 1; i < 10; i++) {
ctx.moveTo(0, i * h / 10);
ctx.lineTo(w, i * h / 10);
}
ctx.stroke();
ctx.closePath();
if (this.length) {
var present_node = this.first;
var x = 0;
ctx.setLineDash([]);
ctx.strokeStyle = "rgb(255, 51, 51)";
ctx.beginPath();
ctx.moveTo(x, h - present_node.val * (h / 10));
while (present_node) {
ctx.lineTo(x * w / 9, h - present_node.val * (h / 10));
x++;
present_node = present_node.next;
}
ctx.stroke();
ctx.closePath();
}
};
const canvas = document.getElementById("canvas");
const deque_contents = document.getElementById("deque_contents");
const button = document.getElementById("push_to_deque");
const min = 0;
const max = 9;
const max_len = 10;
var deque = new Deque(max_len);
deque.plot(canvas);
button.addEventListener("click", function() {
push_to_deque();
});
function push_to_deque() {
deque.push(rand_int(0, 9));
deque_contents.innerHTML = deque.to_string();
deque.plot(canvas);
}
body {
font-family: Arial;
}
.centered {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
text-align: center;
}
<div class="centered">
<p>Implementation of a FIFO deque data structure in JavaScript to mimic oscilloscope functionality. Push the button to push random values to the deque object. After the maximum length is reached, the first item pushed in is popped out to make room for the next value. The values are plotted in the canvas. The canvas must be redrawn to translate the data, making room for the new data.
</p>
<div>
<button type="button" id="push_to_deque">push</button>
</div>
<div>
<h1 id="deque_contents">[]</h1>
</div>
<div>
<canvas id="canvas" width="800" height="500" style="border:2px solid #D3D3D3; margin: 10px;">
</canvas>
</div>
</div>