HTML 5 wait for drawImage to finish in a Vue SPA - html

I have been trying to debug something for a week and I now suspect the problem is that the drawImage function does not have time to finish. I have a for loop that composes a canvas element by stitching together two different canvas elements and then add that composedCanvas as a frame to a GIF.js object. The problem I keep running into is that the bottom stitched canvas does not appear or partially appears (the picture below started to draw but did not finish) in my output GIF file. My question is how do I ensure synchronous execution of drawImage in the context of a Vue SPA method. I have experimented with Promise, but I have not gotten it to work. Can anyone explain and help me with this, please?
EDIT : I have tried wrapping my drawImage in a promise and await but it raised type errors.

I managed to get it working by properly wrapping the drawImage step in a separate method and inside a promise the proper way. See the code below for an example of two methods that were the culprits but are now fixed.
async composeCanvas( gif , timeStep , visibleLayers , delayInput) {
const mapCnv = this.getMapCanvas();
await this.updateInfoCanvas( timeStep )
const numberVisibleLayers = visibleLayers.length;
const composedCnv = await this.stitchCanvases( mapCnv , numberVisibleLayers );
gif.addFrame(composedCnv, {copy:false, delay: delayInput});
},
async stitchCanvases( mapCanvas , numberVisibleLayers ) {
return new Promise(( resolve ) => {
var composedCnv = document.createElement('canvas');
var ctx = composedCnv.getContext('2d');
var ctx_w = mapCanvas.width;
var ctx_h = mapCanvas.height + ((numberVisibleLayers - 1) * 30) + 40;
composedCnv.width = ctx_w;
composedCnv.height = ctx_h;
[
{
cnv: mapCanvas,
y: 0
},
{
cnv: this.infoCanvas,
y: mapCanvas.height
}
].forEach( ( n ) => {
ctx.beginPath();
ctx.drawImage(n.cnv, 0, n.y, ctx_w, n.cnv.height);
});
resolve(composedCnv)
})
}

Related

Event for every frame of HTML Video?

I'd like to build an event handler to deal with each new frame of an HTML 5 Video element. Unfortunately, there's no built in event that fires for each new video frame (the timeupdate event is the closest but fires for each time change rather than each video frame).
Has anyone else run into this same issue? Is there a good way around it?
There is an HTMLVideoElement.requestVideoFrameCallback() method that is still being drafted, and thus neither stable, nor widely implemented (it is only in Chromium based browsers), but which does what you want, along with giving many other details about that frame.
For your Firefox users, this browser has a non standard seekToNextFrame() method, which, depending on what you want to do you could use. This won't exactly work as an event though, it more of a way to, well... seek to the next frame. So this will greatly affect the playing of the video, since it won't respect the duration of each frames.
And for Safari users, the closest is indeed the timeupdate event, but as you know, this doesn't really match the displayed frame.
(async() => {
const log = document.querySelector("pre");
const vid = document.querySelector("video");
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
if( vid.requestVideoFrameCallback ) {
await vid.play();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp, frame) => {
log.textContent = `timestamp: ${ timestamp }
frame: ${ JSON.stringify( frame, null, 4 ) }`;
ctx.drawImage( vid, 0, 0 );
vid.requestVideoFrameCallback( drawingLoop );
};
vid.requestVideoFrameCallback( drawingLoop );
}
else if( vid.seekToNextFrame ) {
const requestNextFrame = (callback) => {
vid.addEventListener( "seeked", () => callback( vid.currentTime ), { once: true } );
vid.seekToNextFrame();
};
await vid.play();
await vid.pause();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp) => {
log.textContent = "timestamp: " + timestamp;
ctx.drawImage( vid, 0, 0 );
requestNextFrame( drawingLoop );
};
requestNextFrame( drawingLoop );
}
else {
console.error("Your browser doesn't support any of these methods, we should fallback to timeupdate");
}
})();
video, canvas {
width: 260px;
}
<pre></pre>
<video src="https://upload.wikimedia.org/wikipedia/commons/2/22/Volcano_Lava_Sample.webm" muted controls></video>
<canvas></canvas>
Note that the encoded frames and the displayed ones are not necessarily the same thing anyway and that browser may not respect the encoded frame rate at all. So based on what you are willing to do, maybe a simple requestAnimationFrame loop, which would fire at every update of the monitor might be better.

Implementing Three.js SSAOPass in AFrame

I was able to successfully integrate Threejs Effect composer in aframe as a component by exporting everything as THREE.Effectcomposer, THREE.SSAOPass etc. and adding the effect inside a aframe component and i tweaked the AFrame renderer to update the effects in the scene. OutlinePass from threejs worked fine in this code but SSAO is not working and i don't get any errors. Please someone help me figure out the problem. the code for SSAOPass looks like this
AFRAME.registerComponent('ssao', {
init: function () {
this.el.addEventListener('that', evt => this.onEnter());
this.el.addEventListener('mouseleave', evt => this.onLeave());
setTimeout(() => this.el.emit("that"), 2000);
},
onEnter: function () {
const scene = this.el.sceneEl.object3D;
const camera = this.el.sceneEl.camera;
const renderer = this.el.sceneEl.renderer;
const render = renderer.render;
const composer = new THREE.EffectComposer(renderer);
//let renderPass = new THREE.RenderPass(scene, camera);
//let outlinePass = new THREE.OutlinePass(new THREE.Vector2(window.innerWidth, window.innerHeight), scene, camera);
const ssaoPass = new THREE.SSAOPass( scene, camera, window.innerWidth, window.innerHeight );
//composer.addPass(renderPass);
//composer.addPass(outlinePass);
ssaoPass.kernelRadius = 16;
composer.addPass( ssaoPass );
// let objects = [];
// this.el.object3D.traverse(node => {
// if (!node.isMesh) return;
// objects.push(node);
// });
// outlinePass.selectedObjects = objects;
// outlinePass.renderToScreen = true;
// outlinePass.edgeStrength = this.data.strength;
// outlinePass.edgeGlow = this.data.glow;
// outlinePass.visibleEdgeColor.set(this.data.color);
// HACK the AFRAME render method (a bit ugly)
const clock = new THREE.Clock();
this.originalRenderMethod = render;
let calledByComposer = false;
renderer.render = function () {
if (calledByComposer) {
render.apply(renderer, arguments);
} else {
calledByComposer = true;
composer.render(clock.getDelta());
calledByComposer = false;
}
};
},
onLeave: function () {
this.el.sceneEl.renderer.render = this.originalRenderMethod;
},
remove: function () {
this.onLeave();
}
});
I have also created a glitch project which i am sharing here. Please feel free to join and collaborate in my project
Edit link: https://glitch.com/edit/#!/accessible-torpid-partridge
Site link:https://accessible-torpid-partridge.glitch.me
Thanks in advance
The code is correct, all you need is to tweak the exposed SSAOShader uniforms: SSAOPass.kernelRadius, SSAOPass.minDistance, SSAOPass.maxDistance - like in the Three.js example.
Keep in mind - the scale in the example is huge, so the values will need to be different in a default aframe scene.
It's a good idea to be able to dynamically update a component (via setAttribute() if you properly handle updates), so you can see what's going on in realtime. Something like I did here - SSAO in a-frame (also based on Don McCurdys gist.
I've used some basic HTML elements, most threejs examples use dat.GUI - it is made for demo / debug tweaks.

Save & load a texture with alpha component in three.js

The following code works perfectly for images that do not contain an alpha channel:
toJSON() {
let output = super.toJSON();
output["geometry"] = this.geometry;
output['imageURL'] = this.mesh.toJSON().images[0]["url"];
return output;
}
fromJSON(data) {
super.fromJSON(data);
this.geometry = data["geometry"];
this.image_path = data["imageURL"];
this.refreshImage();
}
refreshImage() {
const this_obj = this;
const image_texture = new THREE.TextureLoader().load(
//image to load
this.image_path,
//onLoad callback to create the material only once the texture is loaded and its dimensions are available,
//this will ensure aspect ratio is based on the actual texture loaded.
(texture) => {
this_obj.changeGeometry(texture.image.width / texture.image.height)
},
//not required
undefined,
//onError callback
(err) => {
alert("An error occurred while attempting to load image");
}
);
this.mesh.material.map.dispose();
this.mesh.material.dispose();
this.mesh.material = new THREE.MeshPhongMaterial({map: image_texture, side: THREE.DoubleSide,
transparent: true})
this.mesh.material.color.set(this.color);
this.mesh.material.needsUpdate = true;
}
Unfortunately, it does not work for images with alpha channel, because transparent areas are rendered with black opaque color.
Does anyone know why this happens and how best to achieve the desired result?
EDIT:
I got an answer to my question when I realized that the issue is coming from the Mesh.toJSON call. The method is a recursive one that is a real rabbit-hole. But at the bottom of the rabbit-hole you find that texture images are converted to base64 by drawing the image onto an temporary internal canvas. This happens in the ImageUtils.js module inside the getDataURL() function
The issue is that texture images larger than 2048 in width or height are converted into compressed "jpeg" format rather than "png" format that retains the alpha component.
This explains everything.
You can load any image, apply it to a material using TextureLoader, but as soon as you call toJSON to serialize your mesh, the alpha component is lost if the underlying image is larger than 2048 wide or long.
The solution in my case is to write my own function that draws to a canvas and converts the image to base64, but supports larger image sizes. Offcourse one would have to warn the user that it may take some time to perform the conversion.
Here is the texture to url converter that I came up with...stealing heavily from ImageUtils.js and removing error handling code.
function ImageURLfromTexture( image_texture, retain_alpha = true ) {
const image = image_texture.image;
if (image !== undefined) {
if (/^data:/i.test(image.src)) {
return image.src;
}
let _canvas = document.createElementNS('http://www.w3.org/1999/xhtml', 'canvas');
_canvas.width = image.width;
_canvas.height = image.height;
const context = _canvas.getContext('2d');
if (image instanceof ImageData) {
context.putImageData(image, 0, 0);
} else {
context.drawImage(image, 0, 0, image.width, image.height);
}
if ((_canvas.width > 2048 || _canvas.height > 2048) && (!retain_alpha)) {
return _canvas.toDataURL('image/jpeg', 0.6);
} else {
return _canvas.toDataURL('image/png');
}
} else {
return null;
}
}

Dashed Line SVG Animation

I'm newer to working with SVG animations so I apologize in advanced if there is a straightforward answer to this. With a combination of utilizing Scrollmajic.js and GASP I was able to get the SVG to animate when the user scrolls to a specific position. The issue i'm running into is that the animation converts it from a dashed line to a solid stroke. I've done some research and found that using a mask would be the best way to accomplish this though I'm a bit lost on how to approach this method.
https://codepen.io/ncanarelli/pen/wvaeLNa
js:
$(document).ready(function() {
function pathPrepare ($el) {
var lineLength = $el[0].getTotalLength();
$el.css("stroke-dasharray", lineLength);
$el.css("stroke-dashoffset", lineLength);
}
// init controller
var controller = new ScrollMagic.Controller();
// loop through all elements
$(".svg-animation").each(function() {
var $thePath = $(this).find("path#thePath");
// prepare SVG
pathPrepare($thePath);
// build tween
var tween = new TimelineMax()
.add(TweenMax.to($thePath, 1, {
strokeDashoffset: 0,
ease:Linear.easeNone
}))
// build scene
var scene = new ScrollMagic.Scene({
triggerElement: $(this)[0],
duration: 200,
tweenChanges: true,
reverse: true
})
.setTween(tween)
.addTo(controller)
.addIndicators();
});
});
UPDATE: Updated .js to reflect an .each loop + other modifications. Still having issues with implementing a mask.

Cocos2d-x JS getting variables from scene or layer

I'm currently having issues with the following problem.
I'm trying to access the layer inside the scene and thereby the elements that I set in that layer. In this case I want access to the conv_label in the Layer to set the text.
I'm doing this via a ConversationClass which extends cc.Class.
When I try to access the layer via the variable it doesn't work or with getChildByName or Tag it doesn't work (value is always null).
This is a method inside the ConversationClass, I can console log the currentscene without any problem, but any variable I set doesn't appear in the current scene. in this case the name was "conv_layer", I can access the children by just using array calls, but that's not really a good way it seems and quite confusing.
This I tried:
currentscene.children[0].children[3] will give me the right element.
currentscene.conv_layer.getChildByName("text") says conv_layer does not exist
currentscene.children[0].getChildByName("text") returns null
Does anyone know how to solve this issue or can tell me where my thinking went wrong?
Not sure of it matters, but I call the scene (for now) in the following way.
cc.LoaderScene.preload(conversation_load, function() {
cc.director.runScene(new ConversationScene());
this.startGame();
}, this);
This is where I want access
startConversation: function(conversation) {
this._conversationObject = conversation;
this._currentScene = cc.director.getRunningScene();
console.log(this._currentScene); // Shows current scene object (doesn't have conv_layer property)
if(scene !== null)
this._currentConversationLayer = scene.conv_layer; // Returns null
},
This is my scene:
var ConversationScene = cc.Scene.extend({
conv_layer: null,
onEnter: function() {
this._super();
this.conv_layer = new ConversationLayer();
this.conv_layer.setName('conversation');
this.conv_layer.setTag(1);
this.addChild(this.conv_layer);
}
});
and this is my layer:
var ConversationLayer = cc.Layer.extend({
ctor: function() {
this._super();
this.init();
},
init: function() {
this._super();
var winSize = cc.director.getWinSize();
GD.current_conversation = conversation1;
this.background = new cc.Sprite();
this.background.anchorX = 0.5;
this.background.anchorY = 0.5;
this.background.setPositionX(winSize.width / 2);
this.background.setPositionY(winSize.height / 2);
this.addChild(this.background);
this.girl = new cc.Sprite();
this.girl.anchorX = 0;
this.girl.anchorY = 0;
this.addChild(this.girl);
this.text_background = new cc.Sprite(resources.conversation_interactive_assets, cc.rect(0,0,1920/GD.options.scale,320/GD.options.scale));
this.text_background.anchorX = 0.5;
this.text_background.anchorY = 0;
this.text_background.setPositionX(winSize.width / 2);
this.text_background.setPositionY(0);
this.addChild(this.text_background);
// Left
this.conv_label = new cc.LabelBMFont("", resources.font);
this.conv_label.anchorX = 0;
this.conv_label.anchorY = 0;
this.conv_label.setPositionX((winSize.width - this.text_background.width) / 2 + 20);
this.conv_label.setPositionY(this.text_background.height - 30);
this.conv_label.color = cc.color.BLACK;
this.conv_label.setName('text');
this.addChild(this.conv_label);
}
});
The issue was the loading order of everything.
It seems scenes are loaded in async, so the next function would be called but no layer would exist at that point.
Solved it by doing creation inside the class itself and calling onSceneEnter.