Event for every frame of HTML Video? - html

I'd like to build an event handler to deal with each new frame of an HTML 5 Video element. Unfortunately, there's no built in event that fires for each new video frame (the timeupdate event is the closest but fires for each time change rather than each video frame).
Has anyone else run into this same issue? Is there a good way around it?

There is an HTMLVideoElement.requestVideoFrameCallback() method that is still being drafted, and thus neither stable, nor widely implemented (it is only in Chromium based browsers), but which does what you want, along with giving many other details about that frame.
For your Firefox users, this browser has a non standard seekToNextFrame() method, which, depending on what you want to do you could use. This won't exactly work as an event though, it more of a way to, well... seek to the next frame. So this will greatly affect the playing of the video, since it won't respect the duration of each frames.
And for Safari users, the closest is indeed the timeupdate event, but as you know, this doesn't really match the displayed frame.
(async() => {
const log = document.querySelector("pre");
const vid = document.querySelector("video");
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
if( vid.requestVideoFrameCallback ) {
await vid.play();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp, frame) => {
log.textContent = `timestamp: ${ timestamp }
frame: ${ JSON.stringify( frame, null, 4 ) }`;
ctx.drawImage( vid, 0, 0 );
vid.requestVideoFrameCallback( drawingLoop );
};
vid.requestVideoFrameCallback( drawingLoop );
}
else if( vid.seekToNextFrame ) {
const requestNextFrame = (callback) => {
vid.addEventListener( "seeked", () => callback( vid.currentTime ), { once: true } );
vid.seekToNextFrame();
};
await vid.play();
await vid.pause();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp) => {
log.textContent = "timestamp: " + timestamp;
ctx.drawImage( vid, 0, 0 );
requestNextFrame( drawingLoop );
};
requestNextFrame( drawingLoop );
}
else {
console.error("Your browser doesn't support any of these methods, we should fallback to timeupdate");
}
})();
video, canvas {
width: 260px;
}
<pre></pre>
<video src="https://upload.wikimedia.org/wikipedia/commons/2/22/Volcano_Lava_Sample.webm" muted controls></video>
<canvas></canvas>
Note that the encoded frames and the displayed ones are not necessarily the same thing anyway and that browser may not respect the encoded frame rate at all. So based on what you are willing to do, maybe a simple requestAnimationFrame loop, which would fire at every update of the monitor might be better.

Related

Taking camera input, changing it in a canvas element, and returning it back as a MediaStream is failing

I have a simple application that takes camera input, converts it to a canvas (where the stream can be manipulated) and then returns the manipulated stream back via captureStream. However, it seems like the stream isn't returning anything as the output video is black.
Can someone point out where I went wrong here?
The copy below can be copy/pasted and run.
<html>
<head></head>
<body>
<video id="video-id" playsinline autoplay></video>
</body>
<script type="application/javascript">
const video = document.getElementById('video-id');
function manipulatedVideoStream(stream) {
const temp_video = document.createElement('video');
const temp_canvas = document.createElement('canvas');
temp_video.srcObject = stream;
const framerate = 1000 / 30; // ~30 fps
setInterval(() => {
temp_canvas.width = temp_video.videoWidth;
temp_canvas.height = temp_video.videoHeight;
const context = temp_canvas.getContext('2d');
context.drawImage(temp_video, 0, 0, temp_video.width, temp_video.height);
// draw some stuff in here
}, framerate);
return temp_canvas.captureStream(framerate);
}
const constraints = {
audio: false,
video: true
};
function handleSuccess(stream) {
video.srcObject = manipulatedVideoStream(stream);
}
function handleError(error) {
console.log('navigator.MediaDevices.getUserMedia error: ', error.message, error.name);
}
navigator.mediaDevices.getUserMedia(constraints).then(handleSuccess).catch(handleError);
</script>
</html>
The temp_video needs to be set on autoplay.
temp_video.autoplay = true;

HTML 5 wait for drawImage to finish in a Vue SPA

I have been trying to debug something for a week and I now suspect the problem is that the drawImage function does not have time to finish. I have a for loop that composes a canvas element by stitching together two different canvas elements and then add that composedCanvas as a frame to a GIF.js object. The problem I keep running into is that the bottom stitched canvas does not appear or partially appears (the picture below started to draw but did not finish) in my output GIF file. My question is how do I ensure synchronous execution of drawImage in the context of a Vue SPA method. I have experimented with Promise, but I have not gotten it to work. Can anyone explain and help me with this, please?
EDIT : I have tried wrapping my drawImage in a promise and await but it raised type errors.
I managed to get it working by properly wrapping the drawImage step in a separate method and inside a promise the proper way. See the code below for an example of two methods that were the culprits but are now fixed.
async composeCanvas( gif , timeStep , visibleLayers , delayInput) {
const mapCnv = this.getMapCanvas();
await this.updateInfoCanvas( timeStep )
const numberVisibleLayers = visibleLayers.length;
const composedCnv = await this.stitchCanvases( mapCnv , numberVisibleLayers );
gif.addFrame(composedCnv, {copy:false, delay: delayInput});
},
async stitchCanvases( mapCanvas , numberVisibleLayers ) {
return new Promise(( resolve ) => {
var composedCnv = document.createElement('canvas');
var ctx = composedCnv.getContext('2d');
var ctx_w = mapCanvas.width;
var ctx_h = mapCanvas.height + ((numberVisibleLayers - 1) * 30) + 40;
composedCnv.width = ctx_w;
composedCnv.height = ctx_h;
[
{
cnv: mapCanvas,
y: 0
},
{
cnv: this.infoCanvas,
y: mapCanvas.height
}
].forEach( ( n ) => {
ctx.beginPath();
ctx.drawImage(n.cnv, 0, n.y, ctx_w, n.cnv.height);
});
resolve(composedCnv)
})
}

MediaRecorder switch video tracks

I am using MediaRecorder API to record videos in web applications. The application has the option to switch between the camera and screen. I am using Canvas to augment stream recording. The logic involves capturing stream from the camera and redirecting it to the video element. This video is then rendered on canvas and the stream from canvas is passed to MediaRecorder.
What I noticed is that switching from screen to video (and vice-versa) works fine as long as the user doesn't switch/minimize the chrome window. The canvas rendering uses requestAnimationFrame and it freezes after the tab loses its focus.
Is there any way to instruct chrome not to pause the execution of requestAnimationFrame? Is there any alternate way to switch streams without impacting MediaRecorder recording?
Update:
After reading through the documentation, tabs which play audio or having active websocket connection are not throttled. This is something which we are not doing at this moment. This might be a workaround, but hoping for any alternative solution from community. (setTimeout or setInterval are too throttled and hence not using that, plus it impacts rendering quality)
Update 2:
I could able to fix this problem using Worker. Instead of using Main UI Thread for requestAnimationFrame, the worker is invoking the API and the notification is sent to Main Thread via postMessage. Upon completion of rendering by UI Thread, a message is sent back to Worker. There is also a delta period calculation to throttle overwhelming messages from worker.
There is an ongoing proposal to add a .replaceTrack() method to the MediaRecorder API, but for the time being, the specs still read
If at any point, a track is added to or removed from streamโ€™s track set, the UA MUST immediately stop gathering data, discard any data that it has gathered [...]
And that's what is implemented.
So we still have to rely on hacks to make this by ourselves...
The best one is probably to create a local RTC connection, and to record the receiving end.
// creates a mixable stream
async function mixableStream( initial_track ) {
const source_stream = new MediaStream( [] );
const pc1 = new RTCPeerConnection();
const pc2 = new RTCPeerConnection();
pc1.onicecandidate = (evt) => pc2.addIceCandidate( evt.candidate );
pc2.onicecandidate = (evt) => pc1.addIceCandidate( evt.candidate );
const wait_for_stream = waitForEvent( pc2, 'track')
.then( evt => new MediaStream( [ evt.track ] ) );
pc1.addTrack( initial_track, source_stream );
await waitForEvent( pc1, 'negotiationneeded' );
try {
await pc1.setLocalDescription( await pc1.createOffer() );
await pc2.setRemoteDescription( pc1.localDescription );
await pc2.setLocalDescription( await pc2.createAnswer() );
await pc1.setRemoteDescription( pc2.localDescription );
} catch ( err ) {
console.error( err );
}
return {
stream: await wait_for_stream,
async replaceTrack( new_track ) {
const sender = pc1.getSenders().find( ( { track } ) => track.kind == new_track.kind );
return sender && sender.replaceTrack( new_track ) ||
Promise.reject( "no such track" );
}
}
}
{ // remap unstable FF version
const proto = HTMLMediaElement.prototype;
if( !proto.captureStream ) { proto.captureStream = proto.mozCaptureStream; }
}
waitForEvent( document.getElementById( 'starter' ), 'click' )
.then( (evt) => evt.target.parentNode.remove() )
.then( (async() => {
const urls = [
"2/22/Volcano_Lava_Sample.webm",
"/a/a4/BBH_gravitational_lensing_of_gw150914.webm"
].map( (suffix) => "https://upload.wikimedia.org/wikipedia/commons/" + suffix );
const switcher_btn = document.getElementById( 'switcher' );
const stop_btn = document.getElementById( 'stopper' );
const video_out = document.getElementById( 'out' );
let current = 0;
// see below for 'recordVid'
const video_tracks = await Promise.all( urls.map( (url, index) => getVideoTracks( url ) ) );
const mixable_stream = await mixableStream( video_tracks[ current ].track );
switcher_btn.onclick = async (evt) => {
current = +!current;
await mixable_stream.replaceTrack( video_tracks[ current ].track );
};
// final recording part below
// only for demo, so we can see what happens now
video_out.srcObject = mixable_stream.stream;
const rec = new MediaRecorder( mixable_stream.stream );
const chunks = [];
rec.ondataavailable = (evt) => chunks.push( evt.data );
rec.onerror = console.log;
rec.onstop = (evt) => {
const final_file = new Blob( chunks );
video_tracks.forEach( (track) => track.stop() );
// only for demo, since we did set its srcObject
video_out.srcObject = null;
video_out.src = URL.createObjectURL( final_file );
switcher_btn.remove();
stop_btn.remove();
const anchor = document.createElement( 'a' );
anchor.download = 'file.webm';
anchor.textContent = 'download';
anchor.href = video_out.src;
document.body.prepend( anchor );
};
stop_btn.onclick = (evt) => rec.stop();
rec.start();
}))
.catch( console.error )
// some helpers below
// returns a video loaded to given url
function makeVid( url ) {
const vid = document.createElement('video');
vid.crossOrigin = true;
vid.loop = true;
vid.muted = true;
vid.src = url;
return vid.play()
.then( (_) => vid );
}
/* Records videos from given url
** #method stop() ::pauses the linked <video>
** #property track ::the video track
*/
async function getVideoTracks( url ) {
const player = await makeVid( url );
const track = player.captureStream().getVideoTracks()[ 0 ];
return {
track,
stop() { player.pause(); }
};
}
// Promisifies EventTarget.addEventListener
function waitForEvent( target, type ) {
return new Promise( (res) => target.addEventListener( type, res, { once: true } ) );
}
video { max-height: 100vh; max-width: 100vw; vertical-align: top; }
.overlay {
background: #ded;
position: fixed;
z-index: 999;
height: 100vh;
width: 100vw;
top: 0;
left: 0;
display: flex;
align-items: center;
justify-content: center;
}
<div class="overlay">
<button id="starter">start demo</button>
</div>
<button id="switcher">switch source</button>
<button id="stopper">stop recording</button>
<video id="out" muted controls autoplay></video>
Otherwise you can still go the canvas way, with the Web Audio Timer I made for when the page is blurred, even though this will not work in Firefox since they do internally hook to rAF to push new frames in the recorder...
I had the same problem and trying to figure it out without too much complexity such as Canvas or SourceBuffer.
I used the PeerConnection for same page to make a connection. Once the connection is made you can use a rtpSender via peerconnection.addTrack And from there you can easily switch.
I just made a library and a demo that you can find:
https://github.com/meething/StreamSwitcher/

Convert HTML5 Canvas Sequence to a Video File

I'd like to convert an animation in HTML5 canvas to a video file that could be uploaded to YouTube. Is there any sort of screen capture API or something that could allow me to do this programatically?
Back to 2020
Solved it by using MediaRecorder API. It builds exactly to do that, among other things.
Here is a solution that recorded X ms of canvas video
you can extend it with Buttons UI to start, pause, resume, stop, generate URL.
function record(canvas, time) {
var recordedChunks = [];
return new Promise(function (res, rej) {
var stream = canvas.captureStream(25 /*fps*/);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
//ondataavailable will fire in interval of `time || 4000 ms`
mediaRecorder.start(time || 4000);
mediaRecorder.ondataavailable = function (event) {
recordedChunks.push(event.data);
// after stop `dataavilable` event run one more time
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {type: "video/webm" });
var url = URL.createObjectURL(blob);
res(url);
}
})
}
How to use:
const recording = record(canvas, 10000)
// play it on another video element
var video$ = document.createElement('video')
document.body.appendChild(video$)
recording.then(url => video$.setAttribute('src', url) )
// download it
var link$ = document.createElement('a')
link$.setAttribute('download','recordingVideo')
recording.then(url => {
link$.setAttribute('href', url)
link$.click()
})
Firefox has an experimental feature (disabled by default) that is called HTMLCanvasElement.captureStream()
Essentially it captures the canvas element as a video stream which can then be sent to another computer using RTCPeerConnection() or perhaps you can use the YouTube Live Streaming API to stream directly.
See: https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream
Also: https://developers.google.com/youtube/v3/live/getting-started
There exist the whammy library which claims to produce webm videos from stills using JavaScript:
http://antimatter15.com/wp/2012/08/whammy-a-real-time-javascript-webm-encoder/
Note that there are limitations (as to be expected). This encoder bases itself on the webp image format which is currently only supported in Chrome (perhaps the new Opera too but I haven't checked). This means you can't encode in other browsers unless you find a way to encode the image you want to use as a webp image first (see this link for possible solution for that).
Beyond that there is no way to create a video file from images using JavaScript and canvas using native browser APIs.
FileSaver.js + ffmpeg on the command line
With FilSaver.js we can download each canvas frame as PNG: Save to Local File from Blob
Then we just convert the PNGs to any video format with ffmpeg from the command line: How to create a video from images with FFmpeg?
Chromium 75 asks if you want to allow it to save multiple images. Then once you say yes, it downloads the images automatically one by one under your download folder, named as 0.png, 1.png, etc.
It also worked in Firefox 68, but less well, because the browser opens a bunch of "Do you want to save this file" windows. They do have a "do the same for similar downloads" popup, but you have to be quick to select it and hit enter, or else a new popup comes along!
To stop it, you have to close the tab, or add a stop button and some JavaScript logic.
var canvas = document.getElementById("my-canvas");
var ctx = canvas.getContext("2d");
var pixel_size = 1;
var t = 0;
/* We need this to fix t because toBlob calls are asynchronous. */
function createBlobFunc(t) {
return function(blob) {
saveAs(blob, t.toString() + '.png');
};
}
function draw() {
console.log("draw");
for (x = 0; x < canvas.width; x += pixel_size) {
for (y = 0; y < canvas.height; y += pixel_size) {
var b = ((1.0 + Math.sin(t * Math.PI / 16)) / 2.0);
ctx.fillStyle =
"rgba(" +
(x / canvas.width) * 255 + "," +
(y / canvas.height) * 255 + "," +
b * 255 +
",255)"
;
ctx.fillRect(x, y, pixel_size, pixel_size);
}
}
canvas.toBlob(createBlobFunc(t));
t++;
window.requestAnimationFrame(draw);
}
window.requestAnimationFrame(draw);
<canvas id="my-canvas" width="512" height="512" style="border:1px solid black;"></canvas>
<script src="https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/1.3.8/FileSaver.min.js"></script>
GitHub upstream.
Here's an image to GIF output using this instead: https://askubuntu.com/questions/648244/how-do-i-create-an-animated-gif-from-still-images-preferably-with-the-command-l
Frames get skipped if the FPS is too high
This can be observed by reducing the size of the canvas in the above demo to speed things up. At 32x32, my Chromium 77 download in chunks of about 10 files and skips about 50 files in between...
Unfortunately, there is no way to wait for the downloads to finish... close window after file save in FileSaver.js
So the only solution I can see if you have high framerate is framerate limiting... Controlling fps with requestAnimationFrame? Here is a live demo: https://cirosantilli.com/#html-canvas
Maybe one day someone will answer:
H.264 video encoder in javascript
Running ffmpeg in browser - options?
and then we will be able to download the video directly!
Here is an OpenGL version if you decide that the browser is not for you :-) How to use GLUT/OpenGL to render to a file?
Tested in Ubuntu 19.04.
This should help, it allows you to drop some images that get converted into HTML5 CANVAS and then converted into webm video: http://techslides.com/demos/image-video/create.html
Pure javascript, no other 3rd-package.
If you have a video and want to take some frames, you can try as below
class Video2Canvas {
/**
* #description Create a canvas and save the frame of the video that you are giving.
* #param {HTMLVideoElement} video
* #param {Number} fps
* #see https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
* */
constructor(video, fps) {
this.video = video
this.fps = fps
this.canvas = document.createElement("canvas");
[this.canvas.width, this.canvas.height] = [video.width, video.height]
document.querySelector("body").append(this.canvas)
this.ctx = this.canvas.getContext('2d')
this.initEventListener()
}
initEventListener() {
this.video.addEventListener("play", ()=>{
const timeout = Math.round(1000/this.fps)
const width = this.video.width
const height = this.video.height
const recordFunc = ()=> {
if (this.video.paused || this.video.ended) {
return
}
this.ctx.drawImage(this.video, 0, 0, width, height)
const frame = this.ctx.getImageData(0, 0, width, height)
// ... // you can make some modifications to change the frame. For example, create the grayscale frame: https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
// ๐Ÿ‘‡ Below is the options. That saves each frame as a link. If you wish, then you can click the link to download the picture.
const range = document.createRange()
const frag = range.createContextualFragment('<div><a></a></div>')
const tmpCanvas = document.createElement('canvas')
tmpCanvas.width = this.canvas.width
tmpCanvas.height = this.canvas.height
tmpCanvas.getContext('2d').putImageData(frame, 0, 0)
const a = frag.querySelector('a')
a.innerText = "my.png"
a.download = "my.png"
const quality = 1.0
a.href = tmpCanvas.toDataURL("image/png", quality)
a.append(tmpCanvas)
document.querySelector('body').append(frag)
setTimeout(recordFunc, timeout)
}
setTimeout(recordFunc, timeout)
})
}
}
const v2c = new Video2Canvas(document.querySelector("video"), 1)
<video id="my-video" controls="true" width="480" height="270" crossorigin="anonymous">
<source src="http://jplayer.org/video/webm/Big_Buck_Bunny_Trailer.webm" type="video/webm">
</video>
If you want to edit the video (for example, take 5~8sec+12~15sec and then create a new one) you can try
class CanvasRecord {
/**
* #param {HTMLCanvasElement} canvas
* #param {Number} fps
* #param {string} mediaType: video/webm, video/mp4(not support yet) ...
* */
constructor(canvas, fps, mediaType) {
this.canvas = canvas
const stream = canvas.captureStream(25) // fps // https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream
this.mediaRecorder = new MediaRecorder(stream, { // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/MediaRecorder
mimeType: mediaType
})
this.initControlBtn()
this.chunks = []
this.mediaRecorder.ondataavailable = (event) => {
this.chunks.push(event.data)
}
this.mediaRecorder.onstop = (event) => {
const blob = new Blob(this.chunks, {
type: mediaType
})
const url = URL.createObjectURL(blob)
// ๐Ÿ‘‡ Below is a test code for you to know you are successful. Also, you can download it if you wish.
const video = document.createElement('video')
video.src = url
video.onend = (e) => {
URL.revokeObjectURL(this.src);
}
document.querySelector("body").append(video)
video.controls = true
}
}
initControlBtn() {
const range = document.createRange()
const frag = range.createContextualFragment(`<div>
<button id="btn-start">Start</button>
<button id="btn-pause">Pause</button>
<button id="btn-resume">Resume</button>
<button id="btn-end">End</button>
</div>
`)
const btnStart = frag.querySelector(`button[id="btn-start"]`)
const btnPause = frag.querySelector(`button[id="btn-pause"]`)
const btnResume = frag.querySelector(`button[id="btn-resume"]`)
const btnEnd = frag.querySelector(`button[id="btn-end"]`)
document.querySelector('body').append(frag)
btnStart.onclick = (event) => {
this.chunks = [] // clear
this.mediaRecorder.start() // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/start
console.log(this.mediaRecorder.state) // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/state
}
btnPause.onclick = (event) => { // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/pause
this.mediaRecorder.pause()
console.log(this.mediaRecorder.state)
}
btnResume.onclick = (event) => {
this.mediaRecorder.resume()
console.log(this.mediaRecorder.state)
}
btnEnd.onclick = (event) => {
this.mediaRecorder.requestData() // trigger ``ondataavailable`` // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/requestData
this.mediaRecorder.stop()
console.log(this.mediaRecorder.state)
}
}
}
class Video2Canvas {
/**
* #description Create a canvas and save the frame of the video that you are giving.
* #param {HTMLVideoElement} video
* #param {Number} fps
* #see https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
* */
constructor(video, fps) {
this.video = video
this.fps = fps
this.canvas = document.createElement("canvas");
[this.canvas.width, this.canvas.height] = [video.width, video.height]
document.querySelector("body").append(this.canvas)
this.ctx = this.canvas.getContext('2d')
this.initEventListener()
}
initEventListener() {
this.video.addEventListener("play", ()=>{
const timeout = Math.round(1000/this.fps)
const width = this.video.width
const height = this.video.height
const recordFunc = ()=> {
if (this.video.paused || this.video.ended) {
return
}
this.ctx.drawImage(this.video, 0, 0, width, height)
/*
const frame = this.ctx.getImageData(0, 0, width, height)
// ... // you can make some modifications to change the frame. For example, create the grayscale frame: https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
// ๐Ÿ‘‡ Below is the options. That saves each frame as a link. If you wish, then you can click the link to download the picture.
const range = document.createRange()
const frag = range.createContextualFragment('<div><a></a></div>')
const tmpCanvas = document.createElement('canvas')
tmpCanvas.width = this.canvas.width
tmpCanvas.height = this.canvas.height
tmpCanvas.getContext('2d').putImageData(frame, 0, 0)
const a = frag.querySelector('a')
a.innerText = "my.png"
a.download = "my.png"
const quality = 1.0
a.href = tmpCanvas.toDataURL("image/png", quality)
a.append(tmpCanvas)
document.querySelector('body').append(frag)
*/
setTimeout(recordFunc, timeout)
}
setTimeout(recordFunc, timeout)
})
}
}
(()=>{
const v2c = new Video2Canvas(document.querySelector("video"), 60)
const canvasRecord = new CanvasRecord(v2c.canvas, 25, 'video/webm')
v2c.video.addEventListener("play", (event)=>{
if (canvasRecord.mediaRecorder.state === "inactive") {
return
}
document.getElementById("btn-resume").click()
})
v2c.video.addEventListener("pause", (event)=>{
if (canvasRecord.mediaRecorder.state === "inactive") {
return
}
document.getElementById("btn-pause").click()
})
})()
<video id="my-video" controls="true" width="480" height="270" crossorigin="anonymous">
<source src="http://jplayer.org/video/webm/Big_Buck_Bunny_Trailer.webm" type="video/webm">
</video>

Video event on given second

Need to play the next video, when current video time reaches the given value, but don't know why the event that should get currentTime is not firing at all? Maybe someone has an idea why (obviously the simplest tasks are the ones making most problems)
function setupVideoPlayer() {
var currentVideo = 1;
// i.e. [video url, start time, end time]
var videos = [["1.mp4", 0, 4], ["2.mp4", 3, 7]];
var videoPlayer = document.getElementById('videoPlayer');
// set first video clip
videoPlayer.src = videos[0][0]; // 1.mp4
// Syncrhonization function should come here
videoPlayer.addEventListener('timeupdate', function(e){
if (this.currentTime == videos[currentVideo-1][2]) {
currentVideo += 1;
this.src = videos[currentVideo-1][0];
this.play();
}
}, true);
// It makes sure the seek will happen after buffering
// the video
videoPlayer.addEventListener('progress', function(e) {
// Seek to start point
this.currentTime = videos[currentVideo-1][1];
}, false)
}
I would suggest changing the time check to a > rather than = because the currentTime event isn't an integrer
if (this.currentTime == videos[currentVideo-1][2]) {
becomes
if (this.currentTime > videos[currentVideo-1][2]) {
(or you could convert it to an integrer for the test)
If there are other issues it's worth adding a console.log(this.currentTime) to the event just to see if it's triggering the code