Convert HTML5 Canvas Sequence to a Video File - html

I'd like to convert an animation in HTML5 canvas to a video file that could be uploaded to YouTube. Is there any sort of screen capture API or something that could allow me to do this programatically?

Back to 2020
Solved it by using MediaRecorder API. It builds exactly to do that, among other things.
Here is a solution that recorded X ms of canvas video
you can extend it with Buttons UI to start, pause, resume, stop, generate URL.
function record(canvas, time) {
var recordedChunks = [];
return new Promise(function (res, rej) {
var stream = canvas.captureStream(25 /*fps*/);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
//ondataavailable will fire in interval of `time || 4000 ms`
mediaRecorder.start(time || 4000);
mediaRecorder.ondataavailable = function (event) {
recordedChunks.push(event.data);
// after stop `dataavilable` event run one more time
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {type: "video/webm" });
var url = URL.createObjectURL(blob);
res(url);
}
})
}
How to use:
const recording = record(canvas, 10000)
// play it on another video element
var video$ = document.createElement('video')
document.body.appendChild(video$)
recording.then(url => video$.setAttribute('src', url) )
// download it
var link$ = document.createElement('a')
link$.setAttribute('download','recordingVideo')
recording.then(url => {
link$.setAttribute('href', url)
link$.click()
})

Firefox has an experimental feature (disabled by default) that is called HTMLCanvasElement.captureStream()
Essentially it captures the canvas element as a video stream which can then be sent to another computer using RTCPeerConnection() or perhaps you can use the YouTube Live Streaming API to stream directly.
See: https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream
Also: https://developers.google.com/youtube/v3/live/getting-started

There exist the whammy library which claims to produce webm videos from stills using JavaScript:
http://antimatter15.com/wp/2012/08/whammy-a-real-time-javascript-webm-encoder/
Note that there are limitations (as to be expected). This encoder bases itself on the webp image format which is currently only supported in Chrome (perhaps the new Opera too but I haven't checked). This means you can't encode in other browsers unless you find a way to encode the image you want to use as a webp image first (see this link for possible solution for that).
Beyond that there is no way to create a video file from images using JavaScript and canvas using native browser APIs.

FileSaver.js + ffmpeg on the command line
With FilSaver.js we can download each canvas frame as PNG: Save to Local File from Blob
Then we just convert the PNGs to any video format with ffmpeg from the command line: How to create a video from images with FFmpeg?
Chromium 75 asks if you want to allow it to save multiple images. Then once you say yes, it downloads the images automatically one by one under your download folder, named as 0.png, 1.png, etc.
It also worked in Firefox 68, but less well, because the browser opens a bunch of "Do you want to save this file" windows. They do have a "do the same for similar downloads" popup, but you have to be quick to select it and hit enter, or else a new popup comes along!
To stop it, you have to close the tab, or add a stop button and some JavaScript logic.
var canvas = document.getElementById("my-canvas");
var ctx = canvas.getContext("2d");
var pixel_size = 1;
var t = 0;
/* We need this to fix t because toBlob calls are asynchronous. */
function createBlobFunc(t) {
return function(blob) {
saveAs(blob, t.toString() + '.png');
};
}
function draw() {
console.log("draw");
for (x = 0; x < canvas.width; x += pixel_size) {
for (y = 0; y < canvas.height; y += pixel_size) {
var b = ((1.0 + Math.sin(t * Math.PI / 16)) / 2.0);
ctx.fillStyle =
"rgba(" +
(x / canvas.width) * 255 + "," +
(y / canvas.height) * 255 + "," +
b * 255 +
",255)"
;
ctx.fillRect(x, y, pixel_size, pixel_size);
}
}
canvas.toBlob(createBlobFunc(t));
t++;
window.requestAnimationFrame(draw);
}
window.requestAnimationFrame(draw);
<canvas id="my-canvas" width="512" height="512" style="border:1px solid black;"></canvas>
<script src="https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/1.3.8/FileSaver.min.js"></script>
GitHub upstream.
Here's an image to GIF output using this instead: https://askubuntu.com/questions/648244/how-do-i-create-an-animated-gif-from-still-images-preferably-with-the-command-l
Frames get skipped if the FPS is too high
This can be observed by reducing the size of the canvas in the above demo to speed things up. At 32x32, my Chromium 77 download in chunks of about 10 files and skips about 50 files in between...
Unfortunately, there is no way to wait for the downloads to finish... close window after file save in FileSaver.js
So the only solution I can see if you have high framerate is framerate limiting... Controlling fps with requestAnimationFrame? Here is a live demo: https://cirosantilli.com/#html-canvas
Maybe one day someone will answer:
H.264 video encoder in javascript
Running ffmpeg in browser - options?
and then we will be able to download the video directly!
Here is an OpenGL version if you decide that the browser is not for you :-) How to use GLUT/OpenGL to render to a file?
Tested in Ubuntu 19.04.

This should help, it allows you to drop some images that get converted into HTML5 CANVAS and then converted into webm video: http://techslides.com/demos/image-video/create.html

Pure javascript, no other 3rd-package.
If you have a video and want to take some frames, you can try as below
class Video2Canvas {
/**
* #description Create a canvas and save the frame of the video that you are giving.
* #param {HTMLVideoElement} video
* #param {Number} fps
* #see https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
* */
constructor(video, fps) {
this.video = video
this.fps = fps
this.canvas = document.createElement("canvas");
[this.canvas.width, this.canvas.height] = [video.width, video.height]
document.querySelector("body").append(this.canvas)
this.ctx = this.canvas.getContext('2d')
this.initEventListener()
}
initEventListener() {
this.video.addEventListener("play", ()=>{
const timeout = Math.round(1000/this.fps)
const width = this.video.width
const height = this.video.height
const recordFunc = ()=> {
if (this.video.paused || this.video.ended) {
return
}
this.ctx.drawImage(this.video, 0, 0, width, height)
const frame = this.ctx.getImageData(0, 0, width, height)
// ... // you can make some modifications to change the frame. For example, create the grayscale frame: https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
// 👇 Below is the options. That saves each frame as a link. If you wish, then you can click the link to download the picture.
const range = document.createRange()
const frag = range.createContextualFragment('<div><a></a></div>')
const tmpCanvas = document.createElement('canvas')
tmpCanvas.width = this.canvas.width
tmpCanvas.height = this.canvas.height
tmpCanvas.getContext('2d').putImageData(frame, 0, 0)
const a = frag.querySelector('a')
a.innerText = "my.png"
a.download = "my.png"
const quality = 1.0
a.href = tmpCanvas.toDataURL("image/png", quality)
a.append(tmpCanvas)
document.querySelector('body').append(frag)
setTimeout(recordFunc, timeout)
}
setTimeout(recordFunc, timeout)
})
}
}
const v2c = new Video2Canvas(document.querySelector("video"), 1)
<video id="my-video" controls="true" width="480" height="270" crossorigin="anonymous">
<source src="http://jplayer.org/video/webm/Big_Buck_Bunny_Trailer.webm" type="video/webm">
</video>
If you want to edit the video (for example, take 5~8sec+12~15sec and then create a new one) you can try
class CanvasRecord {
/**
* #param {HTMLCanvasElement} canvas
* #param {Number} fps
* #param {string} mediaType: video/webm, video/mp4(not support yet) ...
* */
constructor(canvas, fps, mediaType) {
this.canvas = canvas
const stream = canvas.captureStream(25) // fps // https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream
this.mediaRecorder = new MediaRecorder(stream, { // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/MediaRecorder
mimeType: mediaType
})
this.initControlBtn()
this.chunks = []
this.mediaRecorder.ondataavailable = (event) => {
this.chunks.push(event.data)
}
this.mediaRecorder.onstop = (event) => {
const blob = new Blob(this.chunks, {
type: mediaType
})
const url = URL.createObjectURL(blob)
// 👇 Below is a test code for you to know you are successful. Also, you can download it if you wish.
const video = document.createElement('video')
video.src = url
video.onend = (e) => {
URL.revokeObjectURL(this.src);
}
document.querySelector("body").append(video)
video.controls = true
}
}
initControlBtn() {
const range = document.createRange()
const frag = range.createContextualFragment(`<div>
<button id="btn-start">Start</button>
<button id="btn-pause">Pause</button>
<button id="btn-resume">Resume</button>
<button id="btn-end">End</button>
</div>
`)
const btnStart = frag.querySelector(`button[id="btn-start"]`)
const btnPause = frag.querySelector(`button[id="btn-pause"]`)
const btnResume = frag.querySelector(`button[id="btn-resume"]`)
const btnEnd = frag.querySelector(`button[id="btn-end"]`)
document.querySelector('body').append(frag)
btnStart.onclick = (event) => {
this.chunks = [] // clear
this.mediaRecorder.start() // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/start
console.log(this.mediaRecorder.state) // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/state
}
btnPause.onclick = (event) => { // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/pause
this.mediaRecorder.pause()
console.log(this.mediaRecorder.state)
}
btnResume.onclick = (event) => {
this.mediaRecorder.resume()
console.log(this.mediaRecorder.state)
}
btnEnd.onclick = (event) => {
this.mediaRecorder.requestData() // trigger ``ondataavailable`` // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/requestData
this.mediaRecorder.stop()
console.log(this.mediaRecorder.state)
}
}
}
class Video2Canvas {
/**
* #description Create a canvas and save the frame of the video that you are giving.
* #param {HTMLVideoElement} video
* #param {Number} fps
* #see https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
* */
constructor(video, fps) {
this.video = video
this.fps = fps
this.canvas = document.createElement("canvas");
[this.canvas.width, this.canvas.height] = [video.width, video.height]
document.querySelector("body").append(this.canvas)
this.ctx = this.canvas.getContext('2d')
this.initEventListener()
}
initEventListener() {
this.video.addEventListener("play", ()=>{
const timeout = Math.round(1000/this.fps)
const width = this.video.width
const height = this.video.height
const recordFunc = ()=> {
if (this.video.paused || this.video.ended) {
return
}
this.ctx.drawImage(this.video, 0, 0, width, height)
/*
const frame = this.ctx.getImageData(0, 0, width, height)
// ... // you can make some modifications to change the frame. For example, create the grayscale frame: https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
// 👇 Below is the options. That saves each frame as a link. If you wish, then you can click the link to download the picture.
const range = document.createRange()
const frag = range.createContextualFragment('<div><a></a></div>')
const tmpCanvas = document.createElement('canvas')
tmpCanvas.width = this.canvas.width
tmpCanvas.height = this.canvas.height
tmpCanvas.getContext('2d').putImageData(frame, 0, 0)
const a = frag.querySelector('a')
a.innerText = "my.png"
a.download = "my.png"
const quality = 1.0
a.href = tmpCanvas.toDataURL("image/png", quality)
a.append(tmpCanvas)
document.querySelector('body').append(frag)
*/
setTimeout(recordFunc, timeout)
}
setTimeout(recordFunc, timeout)
})
}
}
(()=>{
const v2c = new Video2Canvas(document.querySelector("video"), 60)
const canvasRecord = new CanvasRecord(v2c.canvas, 25, 'video/webm')
v2c.video.addEventListener("play", (event)=>{
if (canvasRecord.mediaRecorder.state === "inactive") {
return
}
document.getElementById("btn-resume").click()
})
v2c.video.addEventListener("pause", (event)=>{
if (canvasRecord.mediaRecorder.state === "inactive") {
return
}
document.getElementById("btn-pause").click()
})
})()
<video id="my-video" controls="true" width="480" height="270" crossorigin="anonymous">
<source src="http://jplayer.org/video/webm/Big_Buck_Bunny_Trailer.webm" type="video/webm">
</video>

Related

Taking camera input, changing it in a canvas element, and returning it back as a MediaStream is failing

I have a simple application that takes camera input, converts it to a canvas (where the stream can be manipulated) and then returns the manipulated stream back via captureStream. However, it seems like the stream isn't returning anything as the output video is black.
Can someone point out where I went wrong here?
The copy below can be copy/pasted and run.
<html>
<head></head>
<body>
<video id="video-id" playsinline autoplay></video>
</body>
<script type="application/javascript">
const video = document.getElementById('video-id');
function manipulatedVideoStream(stream) {
const temp_video = document.createElement('video');
const temp_canvas = document.createElement('canvas');
temp_video.srcObject = stream;
const framerate = 1000 / 30; // ~30 fps
setInterval(() => {
temp_canvas.width = temp_video.videoWidth;
temp_canvas.height = temp_video.videoHeight;
const context = temp_canvas.getContext('2d');
context.drawImage(temp_video, 0, 0, temp_video.width, temp_video.height);
// draw some stuff in here
}, framerate);
return temp_canvas.captureStream(framerate);
}
const constraints = {
audio: false,
video: true
};
function handleSuccess(stream) {
video.srcObject = manipulatedVideoStream(stream);
}
function handleError(error) {
console.log('navigator.MediaDevices.getUserMedia error: ', error.message, error.name);
}
navigator.mediaDevices.getUserMedia(constraints).then(handleSuccess).catch(handleError);
</script>
</html>
The temp_video needs to be set on autoplay.
temp_video.autoplay = true;

HTML/Javascript Webcam Video and Picture with different resolution

I am developing an app in HTML and typescript/javascript (Chrome support only as it is embedded in an electron app at the end) where there is a Webcam (Logitech B910) video streamed with a quite low video resolution (640 x 480) in order to be able to do some post processing (mainly Tensorflow & Mediapipe) without consuming to much resources on the computer.
From time to time, I also need to some image captures with the highest Webcam resolution (1280 x 720 in my case) of the Webcam.
What is the fastest way to take a capture with a higher resolution than the curently video streamed resolution?
The code below is working but the process (mainly the double resolution switch) is very slow. (roughly 6 seconds on my computer)
Here the code detail :
1/ HTML code :
<div #containerRef class="camera-container" *ngIf="count">
<canvas #canvasref class="camera-canvas"></canvas>
<video #videoref playsinline class="camera-video" width="auto" height="auto">
</video>
</div>
2/ Typescript Webcam video setup code :
static async startCamera(deviceId,containerSize,container : HTMLElement,targetFPS : number,_video,_canvas) {
const videoConfig = {
'audio': false,
'video': {
width: { min: 320, ideal: 640 , max:1280},
height: { min: 240, ideal: 480, max:720},
frameRate: {ideal: targetFPS}},
'deviceId' : { exact: deviceId }
}
};
const stream = await navigator.mediaDevices.getUserMedia(videoConfig);
const camera = new Camera(_video,_canvas);
camera.video.srcObject = stream;
await new Promise((resolve) => {
camera.video.onloadedmetadata = () => { resolve(_video); };
});
camera.video.play();
/// Video tag size
camera.video.width = containerSize.width;
camera.video.height = containerSize.height;
/// Canvas tag size
camera.canvas.width = containerSize.width
camera.canvas.height = containerSize.height
//// Container tag size
container.style.width = containerSize.width.toString()+"px"
container.style.height = containerSize.height.toString()+"px"
return camera;
}
3/ The code I use to do the image capture :
async takePhoto(required_width, required_height) {
const track = this.video.srcObject.getVideoTracks()[0];
let constraints :MediaTrackConstraints = track.getConstraints();
/// Save the current values to load it back at the end
const currentWidth = constraints.width;
const currentHeight = constraints.height;
/// Apply the new resolution
constraints.width = 1920;
constraints.height = 780;
await track.applyConstraints(constraints);
/// Do the image capture
const capture = new ImageCapture(track);
const { imageWidth, imageHeight } = await capture.getPhotoCapabilities();
const width = this.setInRange(required_width, imageWidth);
const height = this.setInRange(required_height, imageHeight);
const photoSettings = (width && height) ? {
imageWidth: width,
imageHeight: height
} : null;
const pic = await capture.takePhoto(photoSettings);
/// load back the previously current resolution saved
constraints.width = currentWidth;
constraints.height = currentHeight;
await track.applyConstraints(constraints);
return pic;
}
setInRange(value, range) {
if(!range) return NaN;
let x = Math.min(range.max, Math.max(range.min, value));
x = Math.round(x / range.step) * range.step;
return x;
}
Note : I thought that takePhoto() from Image Capture API is supposed to always use the highest webcam resolution but in my case (and as given by getPhotoCapabilities() result too) it always uses the resolution I applied to setup the camera (in my case 640 x 480) and that's why I need to do this 'dirty'(?) and slow process.
Is there any fastest way to do it ?

Event for every frame of HTML Video?

I'd like to build an event handler to deal with each new frame of an HTML 5 Video element. Unfortunately, there's no built in event that fires for each new video frame (the timeupdate event is the closest but fires for each time change rather than each video frame).
Has anyone else run into this same issue? Is there a good way around it?
There is an HTMLVideoElement.requestVideoFrameCallback() method that is still being drafted, and thus neither stable, nor widely implemented (it is only in Chromium based browsers), but which does what you want, along with giving many other details about that frame.
For your Firefox users, this browser has a non standard seekToNextFrame() method, which, depending on what you want to do you could use. This won't exactly work as an event though, it more of a way to, well... seek to the next frame. So this will greatly affect the playing of the video, since it won't respect the duration of each frames.
And for Safari users, the closest is indeed the timeupdate event, but as you know, this doesn't really match the displayed frame.
(async() => {
const log = document.querySelector("pre");
const vid = document.querySelector("video");
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
if( vid.requestVideoFrameCallback ) {
await vid.play();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp, frame) => {
log.textContent = `timestamp: ${ timestamp }
frame: ${ JSON.stringify( frame, null, 4 ) }`;
ctx.drawImage( vid, 0, 0 );
vid.requestVideoFrameCallback( drawingLoop );
};
vid.requestVideoFrameCallback( drawingLoop );
}
else if( vid.seekToNextFrame ) {
const requestNextFrame = (callback) => {
vid.addEventListener( "seeked", () => callback( vid.currentTime ), { once: true } );
vid.seekToNextFrame();
};
await vid.play();
await vid.pause();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp) => {
log.textContent = "timestamp: " + timestamp;
ctx.drawImage( vid, 0, 0 );
requestNextFrame( drawingLoop );
};
requestNextFrame( drawingLoop );
}
else {
console.error("Your browser doesn't support any of these methods, we should fallback to timeupdate");
}
})();
video, canvas {
width: 260px;
}
<pre></pre>
<video src="https://upload.wikimedia.org/wikipedia/commons/2/22/Volcano_Lava_Sample.webm" muted controls></video>
<canvas></canvas>
Note that the encoded frames and the displayed ones are not necessarily the same thing anyway and that browser may not respect the encoded frame rate at all. So based on what you are willing to do, maybe a simple requestAnimationFrame loop, which would fire at every update of the monitor might be better.

Scaling video from MediaRecorder

Setup
I have a simple setup like so:
<video src="/myvideo.mp4" id="my-vid" preload></video>
<script>
var chunks = [];
var vid = document.getElementById("my-vid");
vid.onloadeddata = function() {
var mr = new MediaRecorder(vid.captureStream(), {
mimeType: "video/webm; codecs=opus,vp8"
});
mr.ondataavailable = function(e) {
chunks.push(e.data);
}
mr.start();
vid.play();
vid.muted = false;
}
// ----
// everything below here just downloads the file so I can
// run it though ffprobe
// ----
vid.onended = function() {
setTimeout(function(){
var blob = new Blob(chunks, {type: "video/webm; codecs=opus,vp8"});
var link = document.createElement("a");
document.body.appendChild(link);
link.style = "display: none;";
var url = URL.createObjectURL(blob);
link.href = url;
link.download = "content.webm";
link.click();
URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 1000);
}
</script>
My video is 640 pixels by 360 pixels in size.
What I want
What I want is to scale the output video so that it blows it up to 1920 by 1080 (yes, I realize it will be blocky and ugly -- that's okay. I just need the resolution to match a second video so the two can be stitched together more easily)
What I've tried
Simply changing the <video> tag has no effect:
<video src="/myvideo.mp4" id="my-vid" preload height="1080" width="1920"></video>
This changes the display size, but not the decoded size (and therefore the MediaRecorder still returns a 640x360 video)
I can scale the video by using a <canvas> element like so:
<script>
// ...
vid.onplay = function() {
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
(function loop() {
ctx.drawImage(vid, 0, 0, vid.videoWidth, vid.videoHeight, 0, 0, 1920, 1080);
setTimeout(loop, 1000 / 30); // 30 fps
})();
}
</script>
If I then use mr = new MediaRecorder(canvas) instead of mr = new MediaRecorder(vid) then I get a scaled video, but there's no audio!

HTML5 web audio controls

I have music play example http://www.smartjava.org/examples/webaudio/example3.html
And i need to show html5 audio player (with controls) for this song. How i can do it?
Javascript code from example below:
// create the audio context (chrome only for now)
// create the audio context (chrome only for now)
if (! window.AudioContext) {
if (! window.webkitAudioContext) {
alert('no audiocontext found');
}
window.AudioContext = window.webkitAudioContext;
}
var context = new AudioContext();
var audioBuffer;
var sourceNode;
var analyser;
var javascriptNode;
// get the context from the canvas to draw on
var ctx = $("#canvas").get()[0].getContext("2d");
// create a gradient for the fill. Note the strange
// offset, since the gradient is calculated based on
// the canvas, not the specific element we draw
var gradient = ctx.createLinearGradient(0,0,0,300);
gradient.addColorStop(1,'#000000');
gradient.addColorStop(0.75,'#ff0000');
gradient.addColorStop(0.25,'#ffff00');
gradient.addColorStop(0,'#ffffff');
// load the sound
setupAudioNodes();
loadSound("http://www.audiotreasure.com/mp3/Bengali/04_john/04_john_04.mp3");
function setupAudioNodes() {
// setup a javascript node
javascriptNode = context.createScriptProcessor(2048, 1, 1);
// connect to destination, else it isn't called
javascriptNode.connect(context.destination);
// setup a analyzer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
// create a buffer source node
sourceNode = context.createBufferSource();
sourceNode.connect(analyser);
analyser.connect(javascriptNode);
sourceNode.connect(context.destination);
}
// load the specified sound
function loadSound(url) {
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
// When loaded decode the data
request.onload = function() {
// decode the data
context.decodeAudioData(request.response, function(buffer) {
// when the audio is decoded play the sound
playSound(buffer);
}, onError);
}
request.send();
}
function playSound(buffer) {
sourceNode.buffer = buffer;
sourceNode.start(0);
}
// log if an error occurs
function onError(e) {
console.log(e);
}
// when the javascript node is called
// we use information from the analyzer node
// to draw the volume
javascriptNode.onaudioprocess = function() {
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
// set the fill style
ctx.fillStyle=gradient;
drawSpectrum(array);
}
function drawSpectrum(array) {
for ( var i = 0; i < (array.length); i++ ){
var value = array[i];
ctx.fillRect(i*5,325-value,3,325);
// console.log([i,value])
}
};
I think what you want is to use an audio tag for your source and use createMediaElementSource to pass the audio to webaudio for visualization.
Beware that createMediaElementSource checks for CORS access so you must have appropriate cross-origin access for this to work. (It looks like your audio source doesn't return the appropriate access headers for this to work.)