I have been trying to debug something for a week and I now suspect the problem is that the drawImage function does not have time to finish. I have a for loop that composes a canvas element by stitching together two different canvas elements and then add that composedCanvas as a frame to a GIF.js object. The problem I keep running into is that the bottom stitched canvas does not appear or partially appears (the picture below started to draw but did not finish) in my output GIF file. My question is how do I ensure synchronous execution of drawImage in the context of a Vue SPA method. I have experimented with Promise, but I have not gotten it to work. Can anyone explain and help me with this, please?
EDIT : I have tried wrapping my drawImage in a promise and await but it raised type errors.
I managed to get it working by properly wrapping the drawImage step in a separate method and inside a promise the proper way. See the code below for an example of two methods that were the culprits but are now fixed.
async composeCanvas( gif , timeStep , visibleLayers , delayInput) {
const mapCnv = this.getMapCanvas();
await this.updateInfoCanvas( timeStep )
const numberVisibleLayers = visibleLayers.length;
const composedCnv = await this.stitchCanvases( mapCnv , numberVisibleLayers );
gif.addFrame(composedCnv, {copy:false, delay: delayInput});
},
async stitchCanvases( mapCanvas , numberVisibleLayers ) {
return new Promise(( resolve ) => {
var composedCnv = document.createElement('canvas');
var ctx = composedCnv.getContext('2d');
var ctx_w = mapCanvas.width;
var ctx_h = mapCanvas.height + ((numberVisibleLayers - 1) * 30) + 40;
composedCnv.width = ctx_w;
composedCnv.height = ctx_h;
[
{
cnv: mapCanvas,
y: 0
},
{
cnv: this.infoCanvas,
y: mapCanvas.height
}
].forEach( ( n ) => {
ctx.beginPath();
ctx.drawImage(n.cnv, 0, n.y, ctx_w, n.cnv.height);
});
resolve(composedCnv)
})
}
I'd like to build an event handler to deal with each new frame of an HTML 5 Video element. Unfortunately, there's no built in event that fires for each new video frame (the timeupdate event is the closest but fires for each time change rather than each video frame).
Has anyone else run into this same issue? Is there a good way around it?
There is an HTMLVideoElement.requestVideoFrameCallback() method that is still being drafted, and thus neither stable, nor widely implemented (it is only in Chromium based browsers), but which does what you want, along with giving many other details about that frame.
For your Firefox users, this browser has a non standard seekToNextFrame() method, which, depending on what you want to do you could use. This won't exactly work as an event though, it more of a way to, well... seek to the next frame. So this will greatly affect the playing of the video, since it won't respect the duration of each frames.
And for Safari users, the closest is indeed the timeupdate event, but as you know, this doesn't really match the displayed frame.
(async() => {
const log = document.querySelector("pre");
const vid = document.querySelector("video");
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
if( vid.requestVideoFrameCallback ) {
await vid.play();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp, frame) => {
log.textContent = `timestamp: ${ timestamp }
frame: ${ JSON.stringify( frame, null, 4 ) }`;
ctx.drawImage( vid, 0, 0 );
vid.requestVideoFrameCallback( drawingLoop );
};
vid.requestVideoFrameCallback( drawingLoop );
}
else if( vid.seekToNextFrame ) {
const requestNextFrame = (callback) => {
vid.addEventListener( "seeked", () => callback( vid.currentTime ), { once: true } );
vid.seekToNextFrame();
};
await vid.play();
await vid.pause();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp) => {
log.textContent = "timestamp: " + timestamp;
ctx.drawImage( vid, 0, 0 );
requestNextFrame( drawingLoop );
};
requestNextFrame( drawingLoop );
}
else {
console.error("Your browser doesn't support any of these methods, we should fallback to timeupdate");
}
})();
video, canvas {
width: 260px;
}
<pre></pre>
<video src="https://upload.wikimedia.org/wikipedia/commons/2/22/Volcano_Lava_Sample.webm" muted controls></video>
<canvas></canvas>
Note that the encoded frames and the displayed ones are not necessarily the same thing anyway and that browser may not respect the encoded frame rate at all. So based on what you are willing to do, maybe a simple requestAnimationFrame loop, which would fire at every update of the monitor might be better.
I was able to successfully integrate Threejs Effect composer in aframe as a component by exporting everything as THREE.Effectcomposer, THREE.SSAOPass etc. and adding the effect inside a aframe component and i tweaked the AFrame renderer to update the effects in the scene. OutlinePass from threejs worked fine in this code but SSAO is not working and i don't get any errors. Please someone help me figure out the problem. the code for SSAOPass looks like this
AFRAME.registerComponent('ssao', {
init: function () {
this.el.addEventListener('that', evt => this.onEnter());
this.el.addEventListener('mouseleave', evt => this.onLeave());
setTimeout(() => this.el.emit("that"), 2000);
},
onEnter: function () {
const scene = this.el.sceneEl.object3D;
const camera = this.el.sceneEl.camera;
const renderer = this.el.sceneEl.renderer;
const render = renderer.render;
const composer = new THREE.EffectComposer(renderer);
//let renderPass = new THREE.RenderPass(scene, camera);
//let outlinePass = new THREE.OutlinePass(new THREE.Vector2(window.innerWidth, window.innerHeight), scene, camera);
const ssaoPass = new THREE.SSAOPass( scene, camera, window.innerWidth, window.innerHeight );
//composer.addPass(renderPass);
//composer.addPass(outlinePass);
ssaoPass.kernelRadius = 16;
composer.addPass( ssaoPass );
// let objects = [];
// this.el.object3D.traverse(node => {
// if (!node.isMesh) return;
// objects.push(node);
// });
// outlinePass.selectedObjects = objects;
// outlinePass.renderToScreen = true;
// outlinePass.edgeStrength = this.data.strength;
// outlinePass.edgeGlow = this.data.glow;
// outlinePass.visibleEdgeColor.set(this.data.color);
// HACK the AFRAME render method (a bit ugly)
const clock = new THREE.Clock();
this.originalRenderMethod = render;
let calledByComposer = false;
renderer.render = function () {
if (calledByComposer) {
render.apply(renderer, arguments);
} else {
calledByComposer = true;
composer.render(clock.getDelta());
calledByComposer = false;
}
};
},
onLeave: function () {
this.el.sceneEl.renderer.render = this.originalRenderMethod;
},
remove: function () {
this.onLeave();
}
});
I have also created a glitch project which i am sharing here. Please feel free to join and collaborate in my project
Edit link: https://glitch.com/edit/#!/accessible-torpid-partridge
Site link:https://accessible-torpid-partridge.glitch.me
Thanks in advance
The code is correct, all you need is to tweak the exposed SSAOShader uniforms: SSAOPass.kernelRadius, SSAOPass.minDistance, SSAOPass.maxDistance - like in the Three.js example.
Keep in mind - the scale in the example is huge, so the values will need to be different in a default aframe scene.
It's a good idea to be able to dynamically update a component (via setAttribute() if you properly handle updates), so you can see what's going on in realtime. Something like I did here - SSAO in a-frame (also based on Don McCurdys gist.
I've used some basic HTML elements, most threejs examples use dat.GUI - it is made for demo / debug tweaks.
Here is the code:
I want to create an audio program that can play audio from very low frequency to high frequency.
However, this code results in different output (even with the same device):
The sound comes out suddenly - the expected result is it comes out gradually. I am sure my hearing is okay because I've asked my friends to hear;
The audio sounds different on the same frequency.
WARNING: Please adjust your volume to very low in case of any hurting before running this script.
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
// create Oscillator node
var oscillator = audioCtx.createOscillator();
var osc_arr = [];
function purgeSound(){
osc_arr.forEach(function(v){
try {
v.stop();
v.disconnect(audioCtx.destination);
} catch (e) {}
})
}
function playSoundAtFreq(fq){
purgeSound();
var osc = audioCtx.createOscillator();
osc_arr.push(osc);
osc.type = 'square';
osc.frequency.setValueAtTime(fq, audioCtx.currentTime); // value in hertz
$('#fff').val(fq);
osc.connect(audioCtx.destination);
osc.start();
}
$('#stop').click(function(){
purgeSound();
_break = true;
})
var _break = false;
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
var pointer = 0;
var go = appendAttemptAsync(10000);
async function appendAttemptAsync(range) {
if(_break) return;
var target = pointer+range;
for (pointer; pointer<range; pointer++) {
playSoundAtFreq(pointer);
console.log(pointer)
//if(pointer % 1 == 0) {
await sleep(100)
//}
}
return 5221;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
<button id='stop'>stop</button>
<input id="fff" type="text" />
WARNING: Please adjust your volume to very low in case of any hurting before running this script.
Thanks for any kind of suggestions to improve my code.
If you want an Oscillator to sweep like in the YouTube video that you mentioned, you can do something like:
let osc = new OscillatorNode(audioCtx);
osc.connect(audioCtx.destination);
osc.frequency.setValueAtTime(20, audioCtx.currentTime);
osc.frequency.linearRampToValueAtTime(audioCtx.sampleRate / 2, audioCtx.currentTime + 300);
osc.start();
Change the 300 to some appropriate time over which the tone sweeps. I arbitrarily chose 5 minutes.
I do not know why your example doesn't work, but this snippet is the typical way to sweep a tone using WebAudio.
I'd like to convert an animation in HTML5 canvas to a video file that could be uploaded to YouTube. Is there any sort of screen capture API or something that could allow me to do this programatically?
Back to 2020
Solved it by using MediaRecorder API. It builds exactly to do that, among other things.
Here is a solution that recorded X ms of canvas video
you can extend it with Buttons UI to start, pause, resume, stop, generate URL.
function record(canvas, time) {
var recordedChunks = [];
return new Promise(function (res, rej) {
var stream = canvas.captureStream(25 /*fps*/);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
//ondataavailable will fire in interval of `time || 4000 ms`
mediaRecorder.start(time || 4000);
mediaRecorder.ondataavailable = function (event) {
recordedChunks.push(event.data);
// after stop `dataavilable` event run one more time
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {type: "video/webm" });
var url = URL.createObjectURL(blob);
res(url);
}
})
}
How to use:
const recording = record(canvas, 10000)
// play it on another video element
var video$ = document.createElement('video')
document.body.appendChild(video$)
recording.then(url => video$.setAttribute('src', url) )
// download it
var link$ = document.createElement('a')
link$.setAttribute('download','recordingVideo')
recording.then(url => {
link$.setAttribute('href', url)
link$.click()
})
Firefox has an experimental feature (disabled by default) that is called HTMLCanvasElement.captureStream()
Essentially it captures the canvas element as a video stream which can then be sent to another computer using RTCPeerConnection() or perhaps you can use the YouTube Live Streaming API to stream directly.
See: https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream
Also: https://developers.google.com/youtube/v3/live/getting-started
There exist the whammy library which claims to produce webm videos from stills using JavaScript:
http://antimatter15.com/wp/2012/08/whammy-a-real-time-javascript-webm-encoder/
Note that there are limitations (as to be expected). This encoder bases itself on the webp image format which is currently only supported in Chrome (perhaps the new Opera too but I haven't checked). This means you can't encode in other browsers unless you find a way to encode the image you want to use as a webp image first (see this link for possible solution for that).
Beyond that there is no way to create a video file from images using JavaScript and canvas using native browser APIs.
FileSaver.js + ffmpeg on the command line
With FilSaver.js we can download each canvas frame as PNG: Save to Local File from Blob
Then we just convert the PNGs to any video format with ffmpeg from the command line: How to create a video from images with FFmpeg?
Chromium 75 asks if you want to allow it to save multiple images. Then once you say yes, it downloads the images automatically one by one under your download folder, named as 0.png, 1.png, etc.
It also worked in Firefox 68, but less well, because the browser opens a bunch of "Do you want to save this file" windows. They do have a "do the same for similar downloads" popup, but you have to be quick to select it and hit enter, or else a new popup comes along!
To stop it, you have to close the tab, or add a stop button and some JavaScript logic.
var canvas = document.getElementById("my-canvas");
var ctx = canvas.getContext("2d");
var pixel_size = 1;
var t = 0;
/* We need this to fix t because toBlob calls are asynchronous. */
function createBlobFunc(t) {
return function(blob) {
saveAs(blob, t.toString() + '.png');
};
}
function draw() {
console.log("draw");
for (x = 0; x < canvas.width; x += pixel_size) {
for (y = 0; y < canvas.height; y += pixel_size) {
var b = ((1.0 + Math.sin(t * Math.PI / 16)) / 2.0);
ctx.fillStyle =
"rgba(" +
(x / canvas.width) * 255 + "," +
(y / canvas.height) * 255 + "," +
b * 255 +
",255)"
;
ctx.fillRect(x, y, pixel_size, pixel_size);
}
}
canvas.toBlob(createBlobFunc(t));
t++;
window.requestAnimationFrame(draw);
}
window.requestAnimationFrame(draw);
<canvas id="my-canvas" width="512" height="512" style="border:1px solid black;"></canvas>
<script src="https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/1.3.8/FileSaver.min.js"></script>
GitHub upstream.
Here's an image to GIF output using this instead: https://askubuntu.com/questions/648244/how-do-i-create-an-animated-gif-from-still-images-preferably-with-the-command-l
Frames get skipped if the FPS is too high
This can be observed by reducing the size of the canvas in the above demo to speed things up. At 32x32, my Chromium 77 download in chunks of about 10 files and skips about 50 files in between...
Unfortunately, there is no way to wait for the downloads to finish... close window after file save in FileSaver.js
So the only solution I can see if you have high framerate is framerate limiting... Controlling fps with requestAnimationFrame? Here is a live demo: https://cirosantilli.com/#html-canvas
Maybe one day someone will answer:
H.264 video encoder in javascript
Running ffmpeg in browser - options?
and then we will be able to download the video directly!
Here is an OpenGL version if you decide that the browser is not for you :-) How to use GLUT/OpenGL to render to a file?
Tested in Ubuntu 19.04.
This should help, it allows you to drop some images that get converted into HTML5 CANVAS and then converted into webm video: http://techslides.com/demos/image-video/create.html
Pure javascript, no other 3rd-package.
If you have a video and want to take some frames, you can try as below
class Video2Canvas {
/**
* #description Create a canvas and save the frame of the video that you are giving.
* #param {HTMLVideoElement} video
* #param {Number} fps
* #see https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
* */
constructor(video, fps) {
this.video = video
this.fps = fps
this.canvas = document.createElement("canvas");
[this.canvas.width, this.canvas.height] = [video.width, video.height]
document.querySelector("body").append(this.canvas)
this.ctx = this.canvas.getContext('2d')
this.initEventListener()
}
initEventListener() {
this.video.addEventListener("play", ()=>{
const timeout = Math.round(1000/this.fps)
const width = this.video.width
const height = this.video.height
const recordFunc = ()=> {
if (this.video.paused || this.video.ended) {
return
}
this.ctx.drawImage(this.video, 0, 0, width, height)
const frame = this.ctx.getImageData(0, 0, width, height)
// ... // you can make some modifications to change the frame. For example, create the grayscale frame: https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
// 👇 Below is the options. That saves each frame as a link. If you wish, then you can click the link to download the picture.
const range = document.createRange()
const frag = range.createContextualFragment('<div><a></a></div>')
const tmpCanvas = document.createElement('canvas')
tmpCanvas.width = this.canvas.width
tmpCanvas.height = this.canvas.height
tmpCanvas.getContext('2d').putImageData(frame, 0, 0)
const a = frag.querySelector('a')
a.innerText = "my.png"
a.download = "my.png"
const quality = 1.0
a.href = tmpCanvas.toDataURL("image/png", quality)
a.append(tmpCanvas)
document.querySelector('body').append(frag)
setTimeout(recordFunc, timeout)
}
setTimeout(recordFunc, timeout)
})
}
}
const v2c = new Video2Canvas(document.querySelector("video"), 1)
<video id="my-video" controls="true" width="480" height="270" crossorigin="anonymous">
<source src="http://jplayer.org/video/webm/Big_Buck_Bunny_Trailer.webm" type="video/webm">
</video>
If you want to edit the video (for example, take 5~8sec+12~15sec and then create a new one) you can try
class CanvasRecord {
/**
* #param {HTMLCanvasElement} canvas
* #param {Number} fps
* #param {string} mediaType: video/webm, video/mp4(not support yet) ...
* */
constructor(canvas, fps, mediaType) {
this.canvas = canvas
const stream = canvas.captureStream(25) // fps // https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream
this.mediaRecorder = new MediaRecorder(stream, { // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/MediaRecorder
mimeType: mediaType
})
this.initControlBtn()
this.chunks = []
this.mediaRecorder.ondataavailable = (event) => {
this.chunks.push(event.data)
}
this.mediaRecorder.onstop = (event) => {
const blob = new Blob(this.chunks, {
type: mediaType
})
const url = URL.createObjectURL(blob)
// 👇 Below is a test code for you to know you are successful. Also, you can download it if you wish.
const video = document.createElement('video')
video.src = url
video.onend = (e) => {
URL.revokeObjectURL(this.src);
}
document.querySelector("body").append(video)
video.controls = true
}
}
initControlBtn() {
const range = document.createRange()
const frag = range.createContextualFragment(`<div>
<button id="btn-start">Start</button>
<button id="btn-pause">Pause</button>
<button id="btn-resume">Resume</button>
<button id="btn-end">End</button>
</div>
`)
const btnStart = frag.querySelector(`button[id="btn-start"]`)
const btnPause = frag.querySelector(`button[id="btn-pause"]`)
const btnResume = frag.querySelector(`button[id="btn-resume"]`)
const btnEnd = frag.querySelector(`button[id="btn-end"]`)
document.querySelector('body').append(frag)
btnStart.onclick = (event) => {
this.chunks = [] // clear
this.mediaRecorder.start() // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/start
console.log(this.mediaRecorder.state) // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/state
}
btnPause.onclick = (event) => { // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/pause
this.mediaRecorder.pause()
console.log(this.mediaRecorder.state)
}
btnResume.onclick = (event) => {
this.mediaRecorder.resume()
console.log(this.mediaRecorder.state)
}
btnEnd.onclick = (event) => {
this.mediaRecorder.requestData() // trigger ``ondataavailable`` // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/requestData
this.mediaRecorder.stop()
console.log(this.mediaRecorder.state)
}
}
}
class Video2Canvas {
/**
* #description Create a canvas and save the frame of the video that you are giving.
* #param {HTMLVideoElement} video
* #param {Number} fps
* #see https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
* */
constructor(video, fps) {
this.video = video
this.fps = fps
this.canvas = document.createElement("canvas");
[this.canvas.width, this.canvas.height] = [video.width, video.height]
document.querySelector("body").append(this.canvas)
this.ctx = this.canvas.getContext('2d')
this.initEventListener()
}
initEventListener() {
this.video.addEventListener("play", ()=>{
const timeout = Math.round(1000/this.fps)
const width = this.video.width
const height = this.video.height
const recordFunc = ()=> {
if (this.video.paused || this.video.ended) {
return
}
this.ctx.drawImage(this.video, 0, 0, width, height)
/*
const frame = this.ctx.getImageData(0, 0, width, height)
// ... // you can make some modifications to change the frame. For example, create the grayscale frame: https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
// 👇 Below is the options. That saves each frame as a link. If you wish, then you can click the link to download the picture.
const range = document.createRange()
const frag = range.createContextualFragment('<div><a></a></div>')
const tmpCanvas = document.createElement('canvas')
tmpCanvas.width = this.canvas.width
tmpCanvas.height = this.canvas.height
tmpCanvas.getContext('2d').putImageData(frame, 0, 0)
const a = frag.querySelector('a')
a.innerText = "my.png"
a.download = "my.png"
const quality = 1.0
a.href = tmpCanvas.toDataURL("image/png", quality)
a.append(tmpCanvas)
document.querySelector('body').append(frag)
*/
setTimeout(recordFunc, timeout)
}
setTimeout(recordFunc, timeout)
})
}
}
(()=>{
const v2c = new Video2Canvas(document.querySelector("video"), 60)
const canvasRecord = new CanvasRecord(v2c.canvas, 25, 'video/webm')
v2c.video.addEventListener("play", (event)=>{
if (canvasRecord.mediaRecorder.state === "inactive") {
return
}
document.getElementById("btn-resume").click()
})
v2c.video.addEventListener("pause", (event)=>{
if (canvasRecord.mediaRecorder.state === "inactive") {
return
}
document.getElementById("btn-pause").click()
})
})()
<video id="my-video" controls="true" width="480" height="270" crossorigin="anonymous">
<source src="http://jplayer.org/video/webm/Big_Buck_Bunny_Trailer.webm" type="video/webm">
</video>