saving images from html5 camera - html

My question is, how can I save COMPLETE images from a HTML5 camera live stream?
Here you can find me current code ( I'm a JavaScript beginner ):
const video = document.getElementById('video');
const canvas = document.getElementById('faceImage');
const hdConstraints = {
audio: false,
video: { width: { min: 1280 }, height: { min: 720 } }
};
var options = { mimeType: 'video/webm' };
navigator.mediaDevices.getUserMedia(hdConstraints, options)
.then(
(stream) => {
video.srcObject = stream;
video.play();
setInterval(function() {
var context = canvas.getContext('2d');
context.drawImage(video, 0, 0, 1280, 720);
var data = canvas.toDataURL('image/jpeg');
ws.send(data);
}, 1000);
}
);
With this code on the server side I receive not complete jpeg images ( only 10% of the canvas is on the picture and the other parts are blank).
So my question is, how can I correct this code to send only complete images to the server ?
thanks

Related

canvas is throw error of tainted after LoadFromJson

I am using fabric js version 1.7.22
when image set in a repetitive manner in a rectangle of fabric js, at
the first time it will be loaded and saved into JSON using toJSON()
and save an image using todataUrl() method, but when cal canvas a loadFromJson method at that time, this canvas not savable, because it throws tainted canvas error.
Please help me,
I already set crossOrigin in a pattern but it not working. and not
added in canvas JSON.
I have made one Fiddle For Generate Issue :
[http://jsfiddle.net/Mark_1998/kt387vLc/1/][1]
Steps to generate issue :
click on 'set pattern'
then click on 'save canvas'
then click on 'reload canvas' // load canvas from JSON
then click on 'save canvas' // cause issue of tainted canvas
This issue is fixed in new version of fabricjs already. If you are still using 1.7.20 the override fabric.Pattern.prototype.toObject and fabric.Pattern.prototype.initialize, find code in snippet.
var canvas = new fabric.Canvas('canvas', {
height: 500,
width: 500,
});
canvas.backgroundColor = '#ff0000';
canvas.renderAll();
var canvasJSON = {};
document.getElementById('setPat').addEventListener('click', function() {
fabric.util.loadImage('https://cdn.dribbble.com/assets/icon-backtotop-1b04df73090f6b0f3192a3b71874ca3b3cc19dff16adc6cf365cd0c75897f6c0.png', function(image) {
var pattern = new fabric.Pattern({
source: image,
repeat: 'repeat',
crossOrigin: 'Anonymous'
});
var patternObject = new fabric.Rect({
left: 0,
top: 0,
height: canvas.height,
width: canvas.width,
angle: 0,
fill: pattern,
objectCaching: false
})
canvas.add(patternObject);
}, null, {
crossOrigin: 'Anonymous'
});
})
document.getElementById('saveCanvas').addEventListener('click', function() {
console.log('save canvas');
canvasJSON = canvas.toJSON();
var image = canvas.toDataURL("image/png", {
crossOrigin: 'Anonymous'
}); // don't remove this, i need it as thumbnail.
//console.log('canvas.Json', canvasJSON);
//console.log('image', image);
canvas.clear();
canvas.backgroundColor = '#ff0000';
canvas.renderAll();
});
document.getElementById('reloadCanvas').addEventListener('click', function() {
console.log('save canvas');
canvas.loadFromJSON(canvasJSON, function() {
canvas.set({
crossOrigin: 'Anonymous'
})
});
console.log('canvas.Json', canvasJSON);
});
//cross origin was not added in toObject JSON
fabric.Pattern.prototype.toObject = (function(toObject) {
return function() {
return fabric.util.object.extend(toObject.call(this), {
crossOrigin: this.crossOrigin,
patternTransform: this.patternTransform ? this.patternTransform.concat() : null
});
};
})(fabric.Pattern.prototype.toObject);
//cross origin was not added while creating image
fabric.Pattern.prototype.initialize = function(options, callback) {
options || (options = {});
this.id = fabric.Object.__uid++;
this.setOptions(options);
if (!options.source || (options.source && typeof options.source !== 'string')) {
callback && callback(this);
return;
}
// function string
if (typeof fabric.util.getFunctionBody(options.source) !== 'undefined') {
this.source = new Function(fabric.util.getFunctionBody(options.source));
callback && callback(this);
} else {
// img src string
var _this = this;
this.source = fabric.util.createImage();
fabric.util.loadImage(options.source, function(img) {
_this.source = img;
callback && callback(_this);
}, null, this.crossOrigin);
}
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/fabric.js/1.7.20/fabric.js"></script>
<button id="setPat">
Set pattern
</button>
<button id="saveCanvas">
Save canvas
</button>
<button id="reloadCanvas">
Reload CAnvas
</button>
<canvas id="canvas"></canvas>

Scaling video from MediaRecorder

Setup
I have a simple setup like so:
<video src="/myvideo.mp4" id="my-vid" preload></video>
<script>
var chunks = [];
var vid = document.getElementById("my-vid");
vid.onloadeddata = function() {
var mr = new MediaRecorder(vid.captureStream(), {
mimeType: "video/webm; codecs=opus,vp8"
});
mr.ondataavailable = function(e) {
chunks.push(e.data);
}
mr.start();
vid.play();
vid.muted = false;
}
// ----
// everything below here just downloads the file so I can
// run it though ffprobe
// ----
vid.onended = function() {
setTimeout(function(){
var blob = new Blob(chunks, {type: "video/webm; codecs=opus,vp8"});
var link = document.createElement("a");
document.body.appendChild(link);
link.style = "display: none;";
var url = URL.createObjectURL(blob);
link.href = url;
link.download = "content.webm";
link.click();
URL.revokeObjectURL(url);
document.body.removeChild(link);
}, 1000);
}
</script>
My video is 640 pixels by 360 pixels in size.
What I want
What I want is to scale the output video so that it blows it up to 1920 by 1080 (yes, I realize it will be blocky and ugly -- that's okay. I just need the resolution to match a second video so the two can be stitched together more easily)
What I've tried
Simply changing the <video> tag has no effect:
<video src="/myvideo.mp4" id="my-vid" preload height="1080" width="1920"></video>
This changes the display size, but not the decoded size (and therefore the MediaRecorder still returns a 640x360 video)
I can scale the video by using a <canvas> element like so:
<script>
// ...
vid.onplay = function() {
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
(function loop() {
ctx.drawImage(vid, 0, 0, vid.videoWidth, vid.videoHeight, 0, 0, 1920, 1080);
setTimeout(loop, 1000 / 30); // 30 fps
})();
}
</script>
If I then use mr = new MediaRecorder(canvas) instead of mr = new MediaRecorder(vid) then I get a scaled video, but there's no audio!

In chrome App, video file is not saved with its original data/size

In appCtrl.js, for saving video file -
$('#save_file').click(function(e) {
var config = {type: 'saveFile', suggestedName: chosenEntry.name};
chrome.fileSystem.chooseEntry(config, function(writableEntry) {
//blob content is the DataUrl
var blob = new Blob([$scope.blobContent], {type: 'video/mp4'});
$scope.writeFileEntry(writableEntry, blob, function(e) {
console.log('Write complete :)');
});
});
});
$scope.writeFileEntry = function(writableEntry, opt_blob, callback) {
if (!writableEntry) {
console.log('Nothing selected.');
return;
}
writableEntry.createWriter(function(writer) {
writer.onerror = $scope.errorHandler;
writer.onwriteend = callback;
// If we have data, write it to the file. Otherwise, just use the file we
// loaded.
if (opt_blob) {
writer.truncate(opt_blob.size);
$scope.waitForIO(writer, function() {
writer.seek(0);
writer.write(opt_blob);
});
}
else {
chosenEntry.file(function(file) {
writer.truncate(file.fileSize);
waitForIO(writer, function() {
writer.seek(0);
writer.write(file);
});
});
}
}, $scope.errorHandler);
}
$scope.waitForIO = function(writer, callback) {
// set a watchdog to avoid eventual locking:
var start = Date.now();
// wait for a few seconds
var reentrant = function() {
if (writer.readyState===writer.WRITING && Date.now()-start<4000) {
setTimeout(reentrant, 100);
return;
}
if (writer.readyState===writer.WRITING) {
console.error("Write operation taking too long, aborting!"+
" (current writer readyState is "+writer.readyState+")");
writer.abort();
}
else {
callback();
}
};
setTimeout(reentrant, 100);
};
In above code the video file is saved but when i tried to play that saved file in Window Media Player or VLC player , it prompt me as Window media player cannot play the file.The player might not support the file type or might not support the codec that was used to compress the file.
Can u please guide me where m getting wrong, as its my first chrome app.
Thanks in advance.
Change the method to store blob like this.
function dataURItoBlob(dataURI, callback) {
// convert base64 to raw binary data held in a string
// doesn't handle URLEncoded DataURIs
var byteString = atob(dataURI.split(',')[1]);
// separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0]
// write the bytes of the string to an ArrayBuffer
var ab = new ArrayBuffer(byteString.length);
var ia = new Uint8Array(ab);
for (var i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
}
return new Blob([ab], {type: 'video/mp4'});
};
To handle click.
$('#save_file').click(function(e) {
var config = {type: 'saveFile', suggestedName: chosenEntry.name};
chrome.fileSystem.chooseEntry(config, function(writableEntry) {
var blob = dataURItoBlob($scope.blobContent);
$scope.writeFileEntry(writableEntry, blob, function(e) {
console.log('Write complete :)');
});
});
});

Turn webcam off after "Taking Picture"

I am having an issue with turning the webcam off once I have taken a snapshot. The code below works well - but I just cant figure out how to turn off the webcam once I have everything in the canvas.
I have tried a few methods that I have found by some some research, however none seem to help.
I have tried to add video.stop(); in the "snap" eventListener, and it says "undefined is not a function", however most things I have read says it should work?
Error screenshot: https://www.dropbox.com/s/h7g4cidqhimc5ij/Screenshot%202014-08-04%2013.08.04.png
To sum it all up, when someone clicks "Take Picture", I want the picture to be taken and the camera hardware turned off. The eventlister in later half of the code below is for the "Take Picture" button.
function startCam() {
$('#can').hide();
$('#video').show();
$('#tab1-retry').hide();
$('#save-tab1').hide();
var video = document.getElementById("video"),
mask = document.getElementById("mask"),
videoObj = {
"video": true
},
errBack = function(error) {
console.log("Video capture error: ", error.code);
};
// Put video listeners into place
if (navigator.getUserMedia) { // Standard
navigator.getUserMedia(videoObj, function(stream) {
video.src = stream;
video.play();
}, errBack);
} else if (navigator.webkitGetUserMedia) { // WebKit-prefixed
navigator.webkitGetUserMedia(videoObj, function(stream) {
video.src = window.webkitURL.createObjectURL(stream);
video.play();
}, errBack);
} else if (navigator.mozGetUserMedia) { // WebKit-prefixed
navigator.mozGetUserMedia(videoObj, function(stream) {
video.src = window.URL.createObjectURL(stream);
video.play();
}, errBack);
}
document.getElementById("snap").addEventListener("click", function() {
window.canvas1 = new fabric.Canvas('canvas');
video.pause();
$('#video').hide();
$('#snap').hide();
$('#can').show();
$('#save-tab1').show();
$('#tab1-retry').show();
// VIDEO CAPTURE
var imgInstance = new fabric.Image(video, {
left: 0,
top: 0,
});
imgInstance.set('selectable', false);
canvas1.add(imgInstance);
// FIRST LAYER
mask = document.getElementById("mask");
var imgInstance1 = new fabric.Image(mask, {
left: 100,
top: 100,
cornerSize: 20
});
imgInstance1.set('selectable', true);
canvas1.add(imgInstance1);
// CANVAS LAYER
canvas1.setActiveObject(canvas1.item(1));
canvas1.item(1)['evented'] = true;
canvas1.calcOffset();
canvas1.renderAll();
});
}
inside your success callback function you could initialize the stream to a variable say:
var cameraStream = stream;
video.src = window.URL.createObjectURL(stream);
then in your 'snap' eventListener you could just pause() the video stream after taking the screenshot and close/stop the cameraStream:
video.pause();
cameraStream.stop();
.stop() closes the webcam input.

Code to take snapshot of a html page? [duplicate]

Google's "Report a Bug" or "Feedback Tool" lets you select an area of your browser window to create a screenshot that is submitted with your feedback about a bug.
Screenshot by Jason Small, posted in a duplicate question.
How are they doing this? Google's JavaScript feedback API is loaded from here and their overview of the feedback module will demonstrate the screenshot capability.
JavaScript can read the DOM and render a fairly accurate representation of that using canvas. I have been working on a script which converts HTML into a canvas image. Decided today to make an implementation of it into sending feedbacks like you described.
The script allows you to create feedback forms which include a screenshot, created on the client's browser, along with the form. The screenshot is based on the DOM and as such may not be 100% accurate to the real representation as it does not make an actual screenshot, but builds the screenshot based on the information available on the page.
It does not require any rendering from the server, as the whole image is created on the client's browser. The HTML2Canvas script itself is still in a very experimental state, as it does not parse nearly as much of the CSS3 attributes I would want it to, nor does it have any support to load CORS images even if a proxy was available.
Still quite limited browser compatibility (not because more couldn't be supported, just haven't had time to make it more cross browser supported).
For more information, have a look at the examples here:
http://hertzen.com/experiments/jsfeedback/
edit
The html2canvas script is now available separately here and some examples here.
edit 2
Another confirmation that Google uses a very similar method (in fact, based on the documentation, the only major difference is their async method of traversing/drawing) can be found in this presentation by Elliott Sprehn from the Google Feedback team:
http://www.elliottsprehn.com/preso/fluentconf/
Your web app can now take a 'native' screenshot of the client's entire desktop using getUserMedia():
Have a look at this example:
https://www.webrtc-experiment.com/Pluginfree-Screen-Sharing/
The client will have to be using chrome (for now) and will need to enable screen capture support under chrome://flags.
PoC
As Niklas mentioned you can use the html2canvas library to take a screenshot using JS in the browser. I will extend his answer in this point by providing an example of taking a screenshot using this library ("Proof of Concept"):
function report() {
let region = document.querySelector("body"); // whole screen
html2canvas(region, {
onrendered: function(canvas) {
let pngUrl = canvas.toDataURL(); // png in dataURL format
let img = document.querySelector(".screen");
img.src = pngUrl;
// here you can allow user to set bug-region
// and send it with 'pngUrl' to server
},
});
}
.container {
margin-top: 10px;
border: solid 1px black;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/0.4.1/html2canvas.min.js"></script>
<div>Screenshot tester</div>
<button onclick="report()">Take screenshot</button>
<div class="container">
<img width="75%" class="screen">
</div>
In report() function in onrendered after getting image as data URI you can show it to the user and allow him to draw "bug region" by mouse and then send a screenshot and region coordinates to the server.
In this example async/await version was made: with nice makeScreenshot() function.
UPDATE
Simple example which allows you to take screenshot, select region, describe bug and send POST request (here jsfiddle) (the main function is report()).
async function report() {
let screenshot = await makeScreenshot(); // png dataUrl
let img = q(".screen");
img.src = screenshot;
let c = q(".bug-container");
c.classList.remove('hide')
let box = await getBox();
c.classList.add('hide');
send(screenshot,box); // sed post request with bug image, region and description
alert('To see POST requset with image go to: chrome console > network tab');
}
// ----- Helper functions
let q = s => document.querySelector(s); // query selector helper
window.report = report; // bind report be visible in fiddle html
async function makeScreenshot(selector="body")
{
return new Promise((resolve, reject) => {
let node = document.querySelector(selector);
html2canvas(node, { onrendered: (canvas) => {
let pngUrl = canvas.toDataURL();
resolve(pngUrl);
}});
});
}
async function getBox(box) {
return new Promise((resolve, reject) => {
let b = q(".bug");
let r = q(".region");
let scr = q(".screen");
let send = q(".send");
let start=0;
let sx,sy,ex,ey=-1;
r.style.width=0;
r.style.height=0;
let drawBox= () => {
r.style.left = (ex > 0 ? sx : sx+ex ) +'px';
r.style.top = (ey > 0 ? sy : sy+ey) +'px';
r.style.width = Math.abs(ex) +'px';
r.style.height = Math.abs(ey) +'px';
}
//console.log({b,r, scr});
b.addEventListener("click", e=>{
if(start==0) {
sx=e.pageX;
sy=e.pageY;
ex=0;
ey=0;
drawBox();
}
start=(start+1)%3;
});
b.addEventListener("mousemove", e=>{
//console.log(e)
if(start==1) {
ex=e.pageX-sx;
ey=e.pageY-sy
drawBox();
}
});
send.addEventListener("click", e=>{
start=0;
let a=100/75 //zoom out img 75%
resolve({
x:Math.floor(((ex > 0 ? sx : sx+ex )-scr.offsetLeft)*a),
y:Math.floor(((ey > 0 ? sy : sy+ey )-b.offsetTop)*a),
width:Math.floor(Math.abs(ex)*a),
height:Math.floor(Math.abs(ex)*a),
desc: q('.bug-desc').value
});
});
});
}
function send(image,box) {
let formData = new FormData();
let req = new XMLHttpRequest();
formData.append("box", JSON.stringify(box));
formData.append("screenshot", image);
req.open("POST", '/upload/screenshot');
req.send(formData);
}
.bug-container { background: rgb(255,0,0,0.1); margin-top:20px; text-align: center; }
.send { border-radius:5px; padding:10px; background: green; cursor: pointer; }
.region { position: absolute; background: rgba(255,0,0,0.4); }
.example { height: 100px; background: yellow; }
.bug { margin-top: 10px; cursor: crosshair; }
.hide { display: none; }
.screen { pointer-events: none }
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/0.4.1/html2canvas.min.js"></script>
<body>
<div>Screenshot tester</div>
<button onclick="report()">Report bug</button>
<div class="example">Lorem ipsum</div>
<div class="bug-container hide">
<div>Select bug region: click once - move mouse - click again</div>
<div class="bug">
<img width="75%" class="screen" >
<div class="region"></div>
</div>
<div>
<textarea class="bug-desc">Describe bug here...</textarea>
</div>
<div class="send">SEND BUG</div>
</div>
</body>
Get screenshot as Canvas or Jpeg Blob / ArrayBuffer using getDisplayMedia API:
FIX 1: Use the getUserMedia with chromeMediaSource only for Electron.js
FIX 2: Throw error instead return null object
FIX 3: Fix demo to prevent the error: getDisplayMedia must be called from a user gesture handler
// docs: https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getDisplayMedia
// see: https://www.webrtc-experiment.com/Pluginfree-Screen-Sharing/#20893521368186473
// see: https://github.com/muaz-khan/WebRTC-Experiment/blob/master/Pluginfree-Screen-Sharing/conference.js
function getDisplayMedia(options) {
if (navigator.mediaDevices && navigator.mediaDevices.getDisplayMedia) {
return navigator.mediaDevices.getDisplayMedia(options)
}
if (navigator.getDisplayMedia) {
return navigator.getDisplayMedia(options)
}
if (navigator.webkitGetDisplayMedia) {
return navigator.webkitGetDisplayMedia(options)
}
if (navigator.mozGetDisplayMedia) {
return navigator.mozGetDisplayMedia(options)
}
throw new Error('getDisplayMedia is not defined')
}
function getUserMedia(options) {
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
return navigator.mediaDevices.getUserMedia(options)
}
if (navigator.getUserMedia) {
return navigator.getUserMedia(options)
}
if (navigator.webkitGetUserMedia) {
return navigator.webkitGetUserMedia(options)
}
if (navigator.mozGetUserMedia) {
return navigator.mozGetUserMedia(options)
}
throw new Error('getUserMedia is not defined')
}
async function takeScreenshotStream() {
// see: https://developer.mozilla.org/en-US/docs/Web/API/Window/screen
const width = screen.width * (window.devicePixelRatio || 1)
const height = screen.height * (window.devicePixelRatio || 1)
const errors = []
let stream
try {
stream = await getDisplayMedia({
audio: false,
// see: https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamConstraints/video
video: {
width,
height,
frameRate: 1,
},
})
} catch (ex) {
errors.push(ex)
}
// for electron js
if (navigator.userAgent.indexOf('Electron') >= 0) {
try {
stream = await getUserMedia({
audio: false,
video: {
mandatory: {
chromeMediaSource: 'desktop',
// chromeMediaSourceId: source.id,
minWidth : width,
maxWidth : width,
minHeight : height,
maxHeight : height,
},
},
})
} catch (ex) {
errors.push(ex)
}
}
if (errors.length) {
console.debug(...errors)
if (!stream) {
throw errors[errors.length - 1]
}
}
return stream
}
async function takeScreenshotCanvas() {
const stream = await takeScreenshotStream()
// from: https://stackoverflow.com/a/57665309/5221762
const video = document.createElement('video')
const result = await new Promise((resolve, reject) => {
video.onloadedmetadata = () => {
video.play()
video.pause()
// from: https://github.com/kasprownik/electron-screencapture/blob/master/index.js
const canvas = document.createElement('canvas')
canvas.width = video.videoWidth
canvas.height = video.videoHeight
const context = canvas.getContext('2d')
// see: https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElement
context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
resolve(canvas)
}
video.srcObject = stream
})
stream.getTracks().forEach(function (track) {
track.stop()
})
if (result == null) {
throw new Error('Cannot take canvas screenshot')
}
return result
}
// from: https://stackoverflow.com/a/46182044/5221762
function getJpegBlob(canvas) {
return new Promise((resolve, reject) => {
// docs: https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/toBlob
canvas.toBlob(blob => resolve(blob), 'image/jpeg', 0.95)
})
}
async function getJpegBytes(canvas) {
const blob = await getJpegBlob(canvas)
return new Promise((resolve, reject) => {
const fileReader = new FileReader()
fileReader.addEventListener('loadend', function () {
if (this.error) {
reject(this.error)
return
}
resolve(this.result)
})
fileReader.readAsArrayBuffer(blob)
})
}
async function takeScreenshotJpegBlob() {
const canvas = await takeScreenshotCanvas()
return getJpegBlob(canvas)
}
async function takeScreenshotJpegBytes() {
const canvas = await takeScreenshotCanvas()
return getJpegBytes(canvas)
}
function blobToCanvas(blob, maxWidth, maxHeight) {
return new Promise((resolve, reject) => {
const img = new Image()
img.onload = function () {
const canvas = document.createElement('canvas')
const scale = Math.min(
1,
maxWidth ? maxWidth / img.width : 1,
maxHeight ? maxHeight / img.height : 1,
)
canvas.width = img.width * scale
canvas.height = img.height * scale
const ctx = canvas.getContext('2d')
ctx.drawImage(img, 0, 0, img.width, img.height, 0, 0, canvas.width, canvas.height)
resolve(canvas)
}
img.onerror = () => {
reject(new Error('Error load blob to Image'))
}
img.src = URL.createObjectURL(blob)
})
}
DEMO:
document.body.onclick = async () => {
// take the screenshot
var screenshotJpegBlob = await takeScreenshotJpegBlob()
// show preview with max size 300 x 300 px
var previewCanvas = await blobToCanvas(screenshotJpegBlob, 300, 300)
previewCanvas.style.position = 'fixed'
document.body.appendChild(previewCanvas)
// send it to the server
var formdata = new FormData()
formdata.append("screenshot", screenshotJpegBlob)
await fetch('https://your-web-site.com/', {
method: 'POST',
body: formdata,
'Content-Type' : "multipart/form-data",
})
}
// and click on the page
Here is a complete screenshot example that works with chrome in 2021. The end result is a blob ready to be transmitted. Flow is: request media > grab frame > draw to canvas > transfer to blob. If you want to do it more memory efficient explore OffscreenCanvas or possibly ImageBitmapRenderingContext
https://jsfiddle.net/v24hyd3q/1/
// Request media
navigator.mediaDevices.getDisplayMedia().then(stream =>
{
// Grab frame from stream
let track = stream.getVideoTracks()[0];
let capture = new ImageCapture(track);
capture.grabFrame().then(bitmap =>
{
// Stop sharing
track.stop();
// Draw the bitmap to canvas
canvas.width = bitmap.width;
canvas.height = bitmap.height;
canvas.getContext('2d').drawImage(bitmap, 0, 0);
// Grab blob from canvas
canvas.toBlob(blob => {
// Do things with blob here
console.log('output blob:', blob);
});
});
})
.catch(e => console.log(e));
Heres an example using: getDisplayMedia
document.body.innerHTML = '<video style="width: 100%; height: 100%; border: 1px black solid;"/>';
navigator.mediaDevices.getDisplayMedia()
.then( mediaStream => {
const video = document.querySelector('video');
video.srcObject = mediaStream;
video.onloadedmetadata = e => {
video.play();
video.pause();
};
})
.catch( err => console.log(`${err.name}: ${err.message}`));
Also worth checking out is the Screen Capture API docs.
You can try my new JS library: screenshot.js.
It's enable to take real screenshot.
You load the script:
<script src="https://raw.githubusercontent.com/amiad/screenshot.js/master/screenshot.js"></script>
and take screenshot:
new Screenshot({success: img => {
// callback function
myimage = img;
}});
You can read more options in project page.