HTML/Javascript Webcam Video and Picture with different resolution - html

I am developing an app in HTML and typescript/javascript (Chrome support only as it is embedded in an electron app at the end) where there is a Webcam (Logitech B910) video streamed with a quite low video resolution (640 x 480) in order to be able to do some post processing (mainly Tensorflow & Mediapipe) without consuming to much resources on the computer.
From time to time, I also need to some image captures with the highest Webcam resolution (1280 x 720 in my case) of the Webcam.
What is the fastest way to take a capture with a higher resolution than the curently video streamed resolution?
The code below is working but the process (mainly the double resolution switch) is very slow. (roughly 6 seconds on my computer)
Here the code detail :
1/ HTML code :
<div #containerRef class="camera-container" *ngIf="count">
<canvas #canvasref class="camera-canvas"></canvas>
<video #videoref playsinline class="camera-video" width="auto" height="auto">
</video>
</div>
2/ Typescript Webcam video setup code :
static async startCamera(deviceId,containerSize,container : HTMLElement,targetFPS : number,_video,_canvas) {
const videoConfig = {
'audio': false,
'video': {
width: { min: 320, ideal: 640 , max:1280},
height: { min: 240, ideal: 480, max:720},
frameRate: {ideal: targetFPS}},
'deviceId' : { exact: deviceId }
}
};
const stream = await navigator.mediaDevices.getUserMedia(videoConfig);
const camera = new Camera(_video,_canvas);
camera.video.srcObject = stream;
await new Promise((resolve) => {
camera.video.onloadedmetadata = () => { resolve(_video); };
});
camera.video.play();
/// Video tag size
camera.video.width = containerSize.width;
camera.video.height = containerSize.height;
/// Canvas tag size
camera.canvas.width = containerSize.width
camera.canvas.height = containerSize.height
//// Container tag size
container.style.width = containerSize.width.toString()+"px"
container.style.height = containerSize.height.toString()+"px"
return camera;
}
3/ The code I use to do the image capture :
async takePhoto(required_width, required_height) {
const track = this.video.srcObject.getVideoTracks()[0];
let constraints :MediaTrackConstraints = track.getConstraints();
/// Save the current values to load it back at the end
const currentWidth = constraints.width;
const currentHeight = constraints.height;
/// Apply the new resolution
constraints.width = 1920;
constraints.height = 780;
await track.applyConstraints(constraints);
/// Do the image capture
const capture = new ImageCapture(track);
const { imageWidth, imageHeight } = await capture.getPhotoCapabilities();
const width = this.setInRange(required_width, imageWidth);
const height = this.setInRange(required_height, imageHeight);
const photoSettings = (width && height) ? {
imageWidth: width,
imageHeight: height
} : null;
const pic = await capture.takePhoto(photoSettings);
/// load back the previously current resolution saved
constraints.width = currentWidth;
constraints.height = currentHeight;
await track.applyConstraints(constraints);
return pic;
}
setInRange(value, range) {
if(!range) return NaN;
let x = Math.min(range.max, Math.max(range.min, value));
x = Math.round(x / range.step) * range.step;
return x;
}
Note : I thought that takePhoto() from Image Capture API is supposed to always use the highest webcam resolution but in my case (and as given by getPhotoCapabilities() result too) it always uses the resolution I applied to setup the camera (in my case 640 x 480) and that's why I need to do this 'dirty'(?) and slow process.
Is there any fastest way to do it ?

Related

Taking camera input, changing it in a canvas element, and returning it back as a MediaStream is failing

I have a simple application that takes camera input, converts it to a canvas (where the stream can be manipulated) and then returns the manipulated stream back via captureStream. However, it seems like the stream isn't returning anything as the output video is black.
Can someone point out where I went wrong here?
The copy below can be copy/pasted and run.
<html>
<head></head>
<body>
<video id="video-id" playsinline autoplay></video>
</body>
<script type="application/javascript">
const video = document.getElementById('video-id');
function manipulatedVideoStream(stream) {
const temp_video = document.createElement('video');
const temp_canvas = document.createElement('canvas');
temp_video.srcObject = stream;
const framerate = 1000 / 30; // ~30 fps
setInterval(() => {
temp_canvas.width = temp_video.videoWidth;
temp_canvas.height = temp_video.videoHeight;
const context = temp_canvas.getContext('2d');
context.drawImage(temp_video, 0, 0, temp_video.width, temp_video.height);
// draw some stuff in here
}, framerate);
return temp_canvas.captureStream(framerate);
}
const constraints = {
audio: false,
video: true
};
function handleSuccess(stream) {
video.srcObject = manipulatedVideoStream(stream);
}
function handleError(error) {
console.log('navigator.MediaDevices.getUserMedia error: ', error.message, error.name);
}
navigator.mediaDevices.getUserMedia(constraints).then(handleSuccess).catch(handleError);
</script>
</html>
The temp_video needs to be set on autoplay.
temp_video.autoplay = true;

Event for every frame of HTML Video?

I'd like to build an event handler to deal with each new frame of an HTML 5 Video element. Unfortunately, there's no built in event that fires for each new video frame (the timeupdate event is the closest but fires for each time change rather than each video frame).
Has anyone else run into this same issue? Is there a good way around it?
There is an HTMLVideoElement.requestVideoFrameCallback() method that is still being drafted, and thus neither stable, nor widely implemented (it is only in Chromium based browsers), but which does what you want, along with giving many other details about that frame.
For your Firefox users, this browser has a non standard seekToNextFrame() method, which, depending on what you want to do you could use. This won't exactly work as an event though, it more of a way to, well... seek to the next frame. So this will greatly affect the playing of the video, since it won't respect the duration of each frames.
And for Safari users, the closest is indeed the timeupdate event, but as you know, this doesn't really match the displayed frame.
(async() => {
const log = document.querySelector("pre");
const vid = document.querySelector("video");
const canvas = document.querySelector("canvas");
const ctx = canvas.getContext("2d");
if( vid.requestVideoFrameCallback ) {
await vid.play();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp, frame) => {
log.textContent = `timestamp: ${ timestamp }
frame: ${ JSON.stringify( frame, null, 4 ) }`;
ctx.drawImage( vid, 0, 0 );
vid.requestVideoFrameCallback( drawingLoop );
};
vid.requestVideoFrameCallback( drawingLoop );
}
else if( vid.seekToNextFrame ) {
const requestNextFrame = (callback) => {
vid.addEventListener( "seeked", () => callback( vid.currentTime ), { once: true } );
vid.seekToNextFrame();
};
await vid.play();
await vid.pause();
canvas.width = vid.videoWidth;
canvas.height = vid.videoHeight;
ctx.filter = "invert(1)";
const drawingLoop = (timestamp) => {
log.textContent = "timestamp: " + timestamp;
ctx.drawImage( vid, 0, 0 );
requestNextFrame( drawingLoop );
};
requestNextFrame( drawingLoop );
}
else {
console.error("Your browser doesn't support any of these methods, we should fallback to timeupdate");
}
})();
video, canvas {
width: 260px;
}
<pre></pre>
<video src="https://upload.wikimedia.org/wikipedia/commons/2/22/Volcano_Lava_Sample.webm" muted controls></video>
<canvas></canvas>
Note that the encoded frames and the displayed ones are not necessarily the same thing anyway and that browser may not respect the encoded frame rate at all. So based on what you are willing to do, maybe a simple requestAnimationFrame loop, which would fire at every update of the monitor might be better.

Save & load a texture with alpha component in three.js

The following code works perfectly for images that do not contain an alpha channel:
toJSON() {
let output = super.toJSON();
output["geometry"] = this.geometry;
output['imageURL'] = this.mesh.toJSON().images[0]["url"];
return output;
}
fromJSON(data) {
super.fromJSON(data);
this.geometry = data["geometry"];
this.image_path = data["imageURL"];
this.refreshImage();
}
refreshImage() {
const this_obj = this;
const image_texture = new THREE.TextureLoader().load(
//image to load
this.image_path,
//onLoad callback to create the material only once the texture is loaded and its dimensions are available,
//this will ensure aspect ratio is based on the actual texture loaded.
(texture) => {
this_obj.changeGeometry(texture.image.width / texture.image.height)
},
//not required
undefined,
//onError callback
(err) => {
alert("An error occurred while attempting to load image");
}
);
this.mesh.material.map.dispose();
this.mesh.material.dispose();
this.mesh.material = new THREE.MeshPhongMaterial({map: image_texture, side: THREE.DoubleSide,
transparent: true})
this.mesh.material.color.set(this.color);
this.mesh.material.needsUpdate = true;
}
Unfortunately, it does not work for images with alpha channel, because transparent areas are rendered with black opaque color.
Does anyone know why this happens and how best to achieve the desired result?
EDIT:
I got an answer to my question when I realized that the issue is coming from the Mesh.toJSON call. The method is a recursive one that is a real rabbit-hole. But at the bottom of the rabbit-hole you find that texture images are converted to base64 by drawing the image onto an temporary internal canvas. This happens in the ImageUtils.js module inside the getDataURL() function
The issue is that texture images larger than 2048 in width or height are converted into compressed "jpeg" format rather than "png" format that retains the alpha component.
This explains everything.
You can load any image, apply it to a material using TextureLoader, but as soon as you call toJSON to serialize your mesh, the alpha component is lost if the underlying image is larger than 2048 wide or long.
The solution in my case is to write my own function that draws to a canvas and converts the image to base64, but supports larger image sizes. Offcourse one would have to warn the user that it may take some time to perform the conversion.
Here is the texture to url converter that I came up with...stealing heavily from ImageUtils.js and removing error handling code.
function ImageURLfromTexture( image_texture, retain_alpha = true ) {
const image = image_texture.image;
if (image !== undefined) {
if (/^data:/i.test(image.src)) {
return image.src;
}
let _canvas = document.createElementNS('http://www.w3.org/1999/xhtml', 'canvas');
_canvas.width = image.width;
_canvas.height = image.height;
const context = _canvas.getContext('2d');
if (image instanceof ImageData) {
context.putImageData(image, 0, 0);
} else {
context.drawImage(image, 0, 0, image.width, image.height);
}
if ((_canvas.width > 2048 || _canvas.height > 2048) && (!retain_alpha)) {
return _canvas.toDataURL('image/jpeg', 0.6);
} else {
return _canvas.toDataURL('image/png');
}
} else {
return null;
}
}

How to change the icon size of Google Maps marker in Flutter?

I am using google_maps_flutter in my flutter app to use google map I have custom marker icon and I load this with BitmapDescriptor.fromAsset("images/car.png") however my icon size on map is too big I want to make it smaller but I couldn't find any option for that is there any option to change custom marker icon.
here is my flutter code:
mapController.addMarker(
MarkerOptions(
icon: BitmapDescriptor.fromAsset("images/car.png"),
position: LatLng(
deviceLocations[i]['latitude'],
deviceLocations[i]['longitude'],
),
),
);
And here is a screenshot of my android emulator:
As you can see in the picture my custom icon size is too big
TL;DR: As long as are able to encode any image into raw bytes such as Uint8List, you should be fine using it as a marker.
As of now, you can use Uint8List data to create your markers with Google Maps. That means that you can use raw data to paint whatever you want as a map marker, as long as you keep the right encode format (which in this particular scenario, is a png).
I will go through two examples where you can either:
Pick a local asset and dynamically change its size to whatever you want and render it on the map (a Flutter logo image);
Draw some stuff in canvas and render it as marker as well, but this can be any render widget.
Besides this, you can even transform a render widget in an static image and thus, use it as marker too.
1. Using an asset
First, create a method that handles the asset path and receives a size (this can be either the width, height, or both, but using only one will preserve ratio).
import 'dart:ui' as ui;
Future<Uint8List> getBytesFromAsset(String path, int width) async {
ByteData data = await rootBundle.load(path);
ui.Codec codec = await ui.instantiateImageCodec(data.buffer.asUint8List(), targetWidth: width);
ui.FrameInfo fi = await codec.getNextFrame();
return (await fi.image.toByteData(format: ui.ImageByteFormat.png)).buffer.asUint8List();
}
Then, just add it to your map using the right descriptor:
final Uint8List markerIcon = await getBytesFromAsset('assets/images/flutter.png', 100);
final Marker marker = Marker(icon: BitmapDescriptor.fromBytes(markerIcon));
This will produce the following for 50, 100 and 200 width respectively.
2. Using canvas
You can draw anything you want with canvas and then use it as a marker. The following will produce some simple rounded box with a Hello world! text in it.
So, first just draw some stuff using the canvas:
Future<Uint8List> getBytesFromCanvas(int width, int height) async {
final ui.PictureRecorder pictureRecorder = ui.PictureRecorder();
final Canvas canvas = Canvas(pictureRecorder);
final Paint paint = Paint()..color = Colors.blue;
final Radius radius = Radius.circular(20.0);
canvas.drawRRect(
RRect.fromRectAndCorners(
Rect.fromLTWH(0.0, 0.0, width.toDouble(), height.toDouble()),
topLeft: radius,
topRight: radius,
bottomLeft: radius,
bottomRight: radius,
),
paint);
TextPainter painter = TextPainter(textDirection: TextDirection.ltr);
painter.text = TextSpan(
text: 'Hello world',
style: TextStyle(fontSize: 25.0, color: Colors.white),
);
painter.layout();
painter.paint(canvas, Offset((width * 0.5) - painter.width * 0.5, (height * 0.5) - painter.height * 0.5));
final img = await pictureRecorder.endRecording().toImage(width, height);
final data = await img.toByteData(format: ui.ImageByteFormat.png);
return data.buffer.asUint8List();
}
and then use it the same way, but this time providing any data you want (eg. width and height) instead of the asset path.
final Uint8List markerIcon = await getBytesFromCanvas(200, 100);
final Marker marker = Marker(icon: BitmapDescriptor.fromBytes(markerIcon));
and here you have it.
I have updated the function above, now you can scale the image as you like.
Future<Uint8List> getBytesFromCanvas(int width, int height, urlAsset) async {
final ui.PictureRecorder pictureRecorder = ui.PictureRecorder();
final Canvas canvas = Canvas(pictureRecorder);
final ByteData datai = await rootBundle.load(urlAsset);
var imaged = await loadImage(new Uint8List.view(datai.buffer));
canvas.drawImageRect(
imaged,
Rect.fromLTRB(
0.0, 0.0, imaged.width.toDouble(), imaged.height.toDouble()),
Rect.fromLTRB(0.0, 0.0, width.toDouble(), height.toDouble()),
new Paint(),
);
final img = await pictureRecorder.endRecording().toImage(width, height);
final data = await img.toByteData(format: ui.ImageByteFormat.png);
return data.buffer.asUint8List();
}
Here's a May 2020 example of adding a custom Google Map marker.
My example App:
imports:
import 'dart:typed_data';
import 'dart:ui' as ui;
import 'package:flutter/services.dart';
import 'package:flutter/material.dart';
Instantiate your map of markers somewhere in your main stateful class:
Map<MarkerId, Marker> markers = <MarkerId, Marker>{};
Function to convert the icon asset into a Uint8List object (not convoluted at all /s):
Future<Uint8List> getBytesFromAsset(String path, int width) async {
ByteData data = await rootBundle.load(path);
ui.Codec codec =
await ui.instantiateImageCodec(data.buffer.asUint8List(), targetWidth: width);
ui.FrameInfo fi = await codec.getNextFrame();
return (await fi.image.toByteData(format: ui.ImageByteFormat.png)).buffer.asUint8List();
}
add marker function (call this with your latitude and longitude coordinates on where you want the markers)
Future<void> _addMarker(tmp_lat, tmp_lng) async {
var markerIdVal = _locationIndex.toString();
final MarkerId markerId = MarkerId(markerIdVal);
final Uint8List markerIcon = await getBytesFromAsset('assets/img/pin2.png', 100);
// creating a new MARKER
final Marker marker = Marker(
icon: BitmapDescriptor.fromBytes(markerIcon),
markerId: markerId,
position: LatLng(tmp_lat, tmp_lng),
infoWindow: InfoWindow(title: markerIdVal, snippet: 'boop'),
);
setState(() {
// adding a new marker to map
markers[markerId] = marker;
});
}
pubspec.yaml (feel free to try out different icons)
flutter:
uses-material-design: true
assets:
- assets/img/pin1.png
- assets/img/pin2.png
I have the same problem and i solve this way.
Future < Uint8List > getBytesFromCanvas(int width, int height, urlAsset) async
{
final ui.PictureRecorder pictureRecorder = ui.PictureRecorder();
final Canvas canvas = Canvas(pictureRecorder);
final Paint paint = Paint()..color = Colors.transparent;
final Radius radius = Radius.circular(20.0);
canvas.drawRRect(
RRect.fromRectAndCorners(
Rect.fromLTWH(0.0, 0.0, width.toDouble(), height.toDouble()),
topLeft: radius,
topRight: radius,
bottomLeft: radius,
bottomRight: radius,
),
paint);
final ByteData datai = await rootBundle.load(urlAsset);
var imaged = await loadImage(new Uint8List.view(datai.buffer));
canvas.drawImage(imaged, new Offset(0, 0), new Paint());
final img = await pictureRecorder.endRecording().toImage(width, height);
final data = await img.toByteData(format: ui.ImageByteFormat.png);
return data.buffer.asUint8List();
}
Future < ui.Image > loadImage(List < int > img) async {
final Completer < ui.Image > completer = new Completer();
ui.decodeImageFromList(img, (ui.Image img) {
return completer.complete(img);
});
return completer.future;
}
And you can use like this.
final Uint8List markerIcond = await getBytesFromCanvas(80, 98, urlAsset);
setState(() {
markersMap[markerId] = Marker(
markerId: MarkerId("marker_${id}"),
position: LatLng(double.parse(place.lat), double.parse(place.lng)),
icon: BitmapDescriptor.fromBytes(markerIcond),
onTap: () {
_onMarkerTapped(placeRemote);
},
);
});
All the answers given are perfect but I noticed that when you set the targetWidth to a specified number then you might have issues with different phones that have a different devicePixelRatio. So this is how I implemented it.
import 'dart:ui' as ui;
import 'dart:typed_data';
import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
Future<Uint8List> getBytesFromAsset(String path) async {
double pixelRatio = MediaQuery.of(context).devicePixelRatio;
ByteData data = await rootBundle.load(path);
ui.Codec codec = await ui.instantiateImageCodec(
data.buffer.asUint8List(),
targetWidth: pixelRatio.round() * 30
);
ui.FrameInfo fi = await codec.getNextFrame();
return (await fi.image.toByteData(format: ui.ImageByteFormat.png)).buffer.asUint8List();
}
and use the method like this
final Uint8List markerIcon = await getBytesFromAsset('assets/images/bike.png');
Marker(icon: BitmapDescriptor.fromBytes(markerIcon),)
That gives me a dynamic size depending on the devicePixelRatio.
This worked perfectly for me.
BitmapDescriptor.fromAsset() is the correct way to add markers, with one open bug that affects your code. As Saed answered, you need to provide different sizes of the image for different device screen densities. From the image you provided, I would guess the base size for the image you want would be about 48 pixels. So you would need to make copies of sizes, 48, 96 (2.0x), and 144 (3.0x).
The runtime should select the correct one depending on screen density. See https://flutter.dev/docs/development/ui/assets-and-images#declaring-resolution-aware-image-assets.
This is not done automatically on Android or Fuschia at the moment. If you are releasing now and want to work around this, you can check the platform using the following logic:
MediaQueryData data = MediaQuery.of(context);
double ratio = data.devicePixelRatio;
bool isIOS = Theme.of(context).platform == TargetPlatform.iOS;
If the platform is not iOS, you would implement the buckets in your code. Combining the logic into one method:
String imageDir(String prefix, String fileName, double pixelRatio, bool isIOS) {
String directory = '/';
if (!isIOS) {
if (pixelRatio >= 1.5) {
directory = '/2.0x/';
}
else if (pixelRatio >= 2.5) {
directory = '/3.0x/';
}
else if (pixelRatio >= 3.5) {
directory = '/4.0x/';
}
}
return '$prefix$directory$fileName';
}
You could then create a marker for an icon named person_icon in the assets directory **assets/map_icons/**with this code, using the method:
myLocationMarker = Marker(
markerId: MarkerId('myLocation'),
position: showingLocation, flat: true,
icon: BitmapDescriptor.fromAsset(imageDir('assets/map_icons','person_icon.png', ratio, isIos)));
Since google_map_flutter 0.5.26, fromAsset() is deprecated and should be replaced with fromAssetImage() as some other answers mentioned. A more elegant way to apply fromAssetImage() for different resolution devices is to declare resolution-aware image assets. The idea is that Flutter renders screens using logical pixel, which is around 72px per inch if I remember correctly, while modern mobile devices could contain more than 200px per inch. And the solution to make a image looks similar in size on different mobile devices with different pixel density is to prepare multiple copy of the same image in different size, where on lower pixel density device the smaller image is used, and on higher pixel density device the bigger image is used.
So you should prepare for example the following images
images/car.png <-- if this base image is 100x100px
images/2.0x/car.png <-- 2.0x one should be 200x200px
images/3.0x/car.png <-- and 3.0x one should be 300x300px
and modify your code as below, where createLocalImageConfiguration() will apply the correct scale according to devicePixelRatio
mapController.addMarker(
MarkerOptions(
icon: BitmapDescriptor.fromAssetImage(
createLocalImageConfiguration(context),
"images/car.png"),
position: LatLng(
deviceLocations[i]['latitude'],
deviceLocations[i]['longitude'],
),
),
);
Below is the implementation of fromAssetImage() of the latest google_map_flutter 1.0.3. You can see that the underlying implementation of BitmapDescriptor takes an argument scale, which is the key to getting the right size of image.
static Future<BitmapDescriptor> fromAssetImage(
ImageConfiguration configuration,
String assetName, {
AssetBundle bundle,
String package,
bool mipmaps = true,
}) async {
if (!mipmaps && configuration.devicePixelRatio != null) {
return BitmapDescriptor._(<dynamic>[
'fromAssetImage',
assetName,
configuration.devicePixelRatio,
]);
}
final AssetImage assetImage =
AssetImage(assetName, package: package, bundle: bundle);
final AssetBundleImageKey assetBundleImageKey =
await assetImage.obtainKey(configuration);
return BitmapDescriptor._(<dynamic>[
'fromAssetImage',
assetBundleImageKey.name,
assetBundleImageKey.scale,
if (kIsWeb && configuration?.size != null)
[
configuration.size.width,
configuration.size.height,
],
]);
}
NOTE: You can see that the size property of the ImageConfiguration only works for web.
What worked for me to select the right image for different densities:
MediaQueryData mediaQueryData = MediaQuery.of(context);
ImageConfiguration imageConfig = ImageConfiguration(devicePixelRatio: mediaQueryData.devicePixelRatio);
BitmapDescriptor.fromAssetImage(imageConfig, "assets/images/marker.png");
I will add a solution mixing severals ideas and codes from anywhere to fix this problem, first a function to manage image size:
Future<Uint8List> getBytesFromCanvas(double escala, urlAsset) async {
final ui.PictureRecorder pictureRecorder = ui.PictureRecorder();
final Canvas canvas = Canvas(pictureRecorder);
final ByteData datai = await rootBundle.load(urlAsset);
var imaged = await loadImage(new Uint8List.view(datai.buffer));
double width = ((imaged.width.toDouble() * escala).toInt()).toDouble();
double height = ((imaged.height.toDouble() * escala).toInt()).toDouble();
canvas.drawImageRect(imaged, Rect.fromLTRB(0.0, 0.0, imaged.width.toDouble(), imaged.height.toDouble()),
Rect.fromLTRB(0.0, 0.0, width, height),
new Paint(),
);
final img = await pictureRecorder.endRecording().toImage(width.toInt(), height.toInt());
final data = await img.toByteData(format: ui.ImageByteFormat.png);
return data.buffer.asUint8List();
}
Future < ui.Image > loadImage(List < int > img) async {
final Completer < ui.Image > completer = new Completer();
ui.decodeImageFromList(img, (ui.Image img) {
return completer.complete(img);
});
return completer.future;
}
Then apply this function depending on the device IOS or Android. The getBytesFromCanvas() function take two parameters, scale of image real size and asset url.
var iconTour;
bool isIOS = Theme.of(context).platform == TargetPlatform.iOS;
if (isIOS){
final markerIcon = await getBytesFromCanvas(0.7, 'images/Icon.png');
iconTour = BitmapDescriptor.fromBytes(markerIcon);
}
else{
final markerIcon = await getBytesFromCanvas(1, 'images/Icon.png');
iconTour = BitmapDescriptor.fromBytes(markerIcon);
}
setState(() {
final Marker marker = Marker(icon: iconTour);
});
Thats all.
I found simplest way to solve this issue.
I used below version for google map implementation. In lower version of google map BitmapDescriptor.fromBytes not working.
google_maps_flutter: ^0.5.19
And set marker points like
Future setMarkersPoint() async {
var icon = 'your url';
Uint8List dataBytes;
var request = await http.get(icon);
var bytes = await request.bodyBytes;
setState(() {
dataBytes = bytes;
});
final Uint8List markerIcoenter code heren =
await getBytesFromCanvas(150, 150, dataBytes);
var myLatLong = LatLng(double.parse(-6.9024812),
double.parse(107.61881));
_markers.add(Marker(
markerId: MarkerId(myLatLong.toString()),
icon: BitmapDescriptor.fromBytes(markerIcon),
position: myLatLong,
infoWindow: InfoWindow(
title: 'Name of location',
snippet: 'Marker Description',
),
));
}
And If you want to change icon size then use below code.
Future<Uint8List> getBytesFromCanvas(
int width, int height, Uint8List dataBytes) async {
final ui.PictureRecorder pictureRecorder = ui.PictureRecorder();
final Canvas canvas = Canvas(pictureRecorder);
final Paint paint = Paint()..color = Colors.transparent;
final Radius radius = Radius.circular(20.0);
canvas.drawRRect(
RRect.fromRectAndCorners(
Rect.fromLTWH(0.0, 0.0, width.toDouble(), height.toDouble()),
topLeft: radius,
topRight: radius,
bottomLeft: radius,
bottomRight: radius,
),
paint);
var imaged = await loadImage(dataBytes.buffer.asUint8List());
canvas.drawImageRect(
imaged,
Rect.fromLTRB(
0.0, 0.0, imaged.width.toDouble(), imaged.height.toDouble()),
Rect.fromLTRB(0.0, 0.0, width.toDouble(), height.toDouble()),
new Paint(),
);
final img = await pictureRecorder.endRecording().toImage(width, height);
final data = await img.toByteData(format: ui.ImageByteFormat.png);
return data.buffer.asUint8List();
}
Future<ui.Image> loadImage(List<int> img) async {
final Completer<ui.Image> completer = new Completer();
ui.decodeImageFromList(img, (ui.Image img) {
return completer.complete(img);
});
return completer.future;
}
Hope It will work for you..!!
So you can try the Ugly way . MediaQuery will return the ratio and check for conditions manually something Like so
double mq = MediaQuery.of(context).devicePixelRatio;
String icon = "images/car.png";
if (mq>1.5 && mq<2.5) {icon = "images/car2.png";}
else if(mq >= 2.5){icon = "images/car3.png";}
mapController.addMarker(
MarkerOptions(
icon: BitmapDescriptor.fromAsset(icon),
position: LatLng(37.4219999, -122.0862462),
),
);
you need to add your different assets images in your images folder like
-images/car.png
-images/car2.png
-images/car3.png
Try BitmapDescriptor.fromAssetImage. It will ignore the image size as well.
BitmapDescriptor.fromAssetImage(
ImageConfiguration(size: Size(32, 32)), 'assets/car.png')
.then((onValue) {
setState(() {
markerIcon = onValue;
});
});
Also using default configuration fails.
loadMarkerImage(BuildContext context) {
var config = createLocalImageConfiguration(context, size: Size(30, 30));
BitmapDescriptor.fromAssetImage(config, 'assets/car.png')
.then((onValue) {
setState(() {
markerIcon = onValue;
});
});
}
A simple way I found to solve this is simply
BitmapDescriptor get deliveryIcon {
bool isIOS = Theme.of(context).platform == TargetPlatform.iOS;
if (isIOS)
return BitmapDescriptor.fromAsset('assets/icons/orange_pin.png');
else
return BitmapDescriptor.fromAsset(
'assets/icons/3.0x/orange_pin.png');
}
Simply put, supply the android the larger asset.
Large images should be avoided, as they consume unnecessary space. Images should be scaled for your map, with variations of pixel resolution to cater for the device.
For example the base image should be scaled to the correct size outside of your application. Different devices have different pixel resolutions, which flutter caters for. Different version of your image are required so that the image does not appear jagged. Scale up the image for different resolutions. i.e base version 32x32 pixels, version 2.0 will be 64x64 pixels, version 3.0 will be 128x128 etc. See the standard flutter way described below, which caters for different pixel resolutions, dependent on the device manufacturer.
BitmapDescriptor.fromAsset does not support the automatic decoding of pixel resolution, and will load the file specified in the path. To correct this call AssetImage to decode the correct filename.
There is a bug with the rendering of images, images in iOS look bigger than Android, see defect 24865. There is a workaround for this too, by hardcoding the file name of the resolution you would prefer.
The following sections outline the standard flutter way, the AssetImage workaround, and the 24865 workaround.
Standard Flutter image naming conventions
Create an asset folder with the naming convention:
pathtoimages/image.png
pathtoimages/Mx/image.png
pathtoimages/Nx/image.png
pathtoimages/etc.
Where M and N are resolutions (2.0x) or themes (dark).
Then add the image or all the images to the pubspec.file as either
flutter:
assets:
- pathtoimages/image.png
or
flutter:
assets:
- pathtoimages/
Workaround for Google Maps
This standard requires that images are then loaded using AssetImage('pathtoimages/image.png') which is not supported by the google maps plugin. Google maps requires that you use BitmapDescriptor.fromAsset('pathtoimages/image.png'), which at this time does not resolve to the correct image. To fix this use you can get the correct image from AssetImage by first createLocalImageConfiguration using the BuildContext as defined here. Then use this configuration to resolve the correct image as follows:
ImageConfiguration config = createLocalImageConfiguration(context);
AssetImage('pathtoimages/image.png')
.obtainKey(config)
.then((resolvedImage) {
print('Name: ' + resolvedImage.onValue.name);
});
Defect 24865 workaround
BitmapDescriptor get deliveryIcon {
bool isIOS = Theme.of(context).platform == TargetPlatform.iOS;
If (isIOS)
return BitmapDescriptor.fromAsset('pathtoimages/image.png');
else
return BitmapDescriptor.fromAsset(
resolvedImageName);
}

Convert HTML5 Canvas Sequence to a Video File

I'd like to convert an animation in HTML5 canvas to a video file that could be uploaded to YouTube. Is there any sort of screen capture API or something that could allow me to do this programatically?
Back to 2020
Solved it by using MediaRecorder API. It builds exactly to do that, among other things.
Here is a solution that recorded X ms of canvas video
you can extend it with Buttons UI to start, pause, resume, stop, generate URL.
function record(canvas, time) {
var recordedChunks = [];
return new Promise(function (res, rej) {
var stream = canvas.captureStream(25 /*fps*/);
mediaRecorder = new MediaRecorder(stream, {
mimeType: "video/webm; codecs=vp9"
});
//ondataavailable will fire in interval of `time || 4000 ms`
mediaRecorder.start(time || 4000);
mediaRecorder.ondataavailable = function (event) {
recordedChunks.push(event.data);
// after stop `dataavilable` event run one more time
if (mediaRecorder.state === 'recording') {
mediaRecorder.stop();
}
}
mediaRecorder.onstop = function (event) {
var blob = new Blob(recordedChunks, {type: "video/webm" });
var url = URL.createObjectURL(blob);
res(url);
}
})
}
How to use:
const recording = record(canvas, 10000)
// play it on another video element
var video$ = document.createElement('video')
document.body.appendChild(video$)
recording.then(url => video$.setAttribute('src', url) )
// download it
var link$ = document.createElement('a')
link$.setAttribute('download','recordingVideo')
recording.then(url => {
link$.setAttribute('href', url)
link$.click()
})
Firefox has an experimental feature (disabled by default) that is called HTMLCanvasElement.captureStream()
Essentially it captures the canvas element as a video stream which can then be sent to another computer using RTCPeerConnection() or perhaps you can use the YouTube Live Streaming API to stream directly.
See: https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream
Also: https://developers.google.com/youtube/v3/live/getting-started
There exist the whammy library which claims to produce webm videos from stills using JavaScript:
http://antimatter15.com/wp/2012/08/whammy-a-real-time-javascript-webm-encoder/
Note that there are limitations (as to be expected). This encoder bases itself on the webp image format which is currently only supported in Chrome (perhaps the new Opera too but I haven't checked). This means you can't encode in other browsers unless you find a way to encode the image you want to use as a webp image first (see this link for possible solution for that).
Beyond that there is no way to create a video file from images using JavaScript and canvas using native browser APIs.
FileSaver.js + ffmpeg on the command line
With FilSaver.js we can download each canvas frame as PNG: Save to Local File from Blob
Then we just convert the PNGs to any video format with ffmpeg from the command line: How to create a video from images with FFmpeg?
Chromium 75 asks if you want to allow it to save multiple images. Then once you say yes, it downloads the images automatically one by one under your download folder, named as 0.png, 1.png, etc.
It also worked in Firefox 68, but less well, because the browser opens a bunch of "Do you want to save this file" windows. They do have a "do the same for similar downloads" popup, but you have to be quick to select it and hit enter, or else a new popup comes along!
To stop it, you have to close the tab, or add a stop button and some JavaScript logic.
var canvas = document.getElementById("my-canvas");
var ctx = canvas.getContext("2d");
var pixel_size = 1;
var t = 0;
/* We need this to fix t because toBlob calls are asynchronous. */
function createBlobFunc(t) {
return function(blob) {
saveAs(blob, t.toString() + '.png');
};
}
function draw() {
console.log("draw");
for (x = 0; x < canvas.width; x += pixel_size) {
for (y = 0; y < canvas.height; y += pixel_size) {
var b = ((1.0 + Math.sin(t * Math.PI / 16)) / 2.0);
ctx.fillStyle =
"rgba(" +
(x / canvas.width) * 255 + "," +
(y / canvas.height) * 255 + "," +
b * 255 +
",255)"
;
ctx.fillRect(x, y, pixel_size, pixel_size);
}
}
canvas.toBlob(createBlobFunc(t));
t++;
window.requestAnimationFrame(draw);
}
window.requestAnimationFrame(draw);
<canvas id="my-canvas" width="512" height="512" style="border:1px solid black;"></canvas>
<script src="https://cdnjs.cloudflare.com/ajax/libs/FileSaver.js/1.3.8/FileSaver.min.js"></script>
GitHub upstream.
Here's an image to GIF output using this instead: https://askubuntu.com/questions/648244/how-do-i-create-an-animated-gif-from-still-images-preferably-with-the-command-l
Frames get skipped if the FPS is too high
This can be observed by reducing the size of the canvas in the above demo to speed things up. At 32x32, my Chromium 77 download in chunks of about 10 files and skips about 50 files in between...
Unfortunately, there is no way to wait for the downloads to finish... close window after file save in FileSaver.js
So the only solution I can see if you have high framerate is framerate limiting... Controlling fps with requestAnimationFrame? Here is a live demo: https://cirosantilli.com/#html-canvas
Maybe one day someone will answer:
H.264 video encoder in javascript
Running ffmpeg in browser - options?
and then we will be able to download the video directly!
Here is an OpenGL version if you decide that the browser is not for you :-) How to use GLUT/OpenGL to render to a file?
Tested in Ubuntu 19.04.
This should help, it allows you to drop some images that get converted into HTML5 CANVAS and then converted into webm video: http://techslides.com/demos/image-video/create.html
Pure javascript, no other 3rd-package.
If you have a video and want to take some frames, you can try as below
class Video2Canvas {
/**
* #description Create a canvas and save the frame of the video that you are giving.
* #param {HTMLVideoElement} video
* #param {Number} fps
* #see https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
* */
constructor(video, fps) {
this.video = video
this.fps = fps
this.canvas = document.createElement("canvas");
[this.canvas.width, this.canvas.height] = [video.width, video.height]
document.querySelector("body").append(this.canvas)
this.ctx = this.canvas.getContext('2d')
this.initEventListener()
}
initEventListener() {
this.video.addEventListener("play", ()=>{
const timeout = Math.round(1000/this.fps)
const width = this.video.width
const height = this.video.height
const recordFunc = ()=> {
if (this.video.paused || this.video.ended) {
return
}
this.ctx.drawImage(this.video, 0, 0, width, height)
const frame = this.ctx.getImageData(0, 0, width, height)
// ... // you can make some modifications to change the frame. For example, create the grayscale frame: https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
// 👇 Below is the options. That saves each frame as a link. If you wish, then you can click the link to download the picture.
const range = document.createRange()
const frag = range.createContextualFragment('<div><a></a></div>')
const tmpCanvas = document.createElement('canvas')
tmpCanvas.width = this.canvas.width
tmpCanvas.height = this.canvas.height
tmpCanvas.getContext('2d').putImageData(frame, 0, 0)
const a = frag.querySelector('a')
a.innerText = "my.png"
a.download = "my.png"
const quality = 1.0
a.href = tmpCanvas.toDataURL("image/png", quality)
a.append(tmpCanvas)
document.querySelector('body').append(frag)
setTimeout(recordFunc, timeout)
}
setTimeout(recordFunc, timeout)
})
}
}
const v2c = new Video2Canvas(document.querySelector("video"), 1)
<video id="my-video" controls="true" width="480" height="270" crossorigin="anonymous">
<source src="http://jplayer.org/video/webm/Big_Buck_Bunny_Trailer.webm" type="video/webm">
</video>
If you want to edit the video (for example, take 5~8sec+12~15sec and then create a new one) you can try
class CanvasRecord {
/**
* #param {HTMLCanvasElement} canvas
* #param {Number} fps
* #param {string} mediaType: video/webm, video/mp4(not support yet) ...
* */
constructor(canvas, fps, mediaType) {
this.canvas = canvas
const stream = canvas.captureStream(25) // fps // https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/captureStream
this.mediaRecorder = new MediaRecorder(stream, { // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/MediaRecorder
mimeType: mediaType
})
this.initControlBtn()
this.chunks = []
this.mediaRecorder.ondataavailable = (event) => {
this.chunks.push(event.data)
}
this.mediaRecorder.onstop = (event) => {
const blob = new Blob(this.chunks, {
type: mediaType
})
const url = URL.createObjectURL(blob)
// 👇 Below is a test code for you to know you are successful. Also, you can download it if you wish.
const video = document.createElement('video')
video.src = url
video.onend = (e) => {
URL.revokeObjectURL(this.src);
}
document.querySelector("body").append(video)
video.controls = true
}
}
initControlBtn() {
const range = document.createRange()
const frag = range.createContextualFragment(`<div>
<button id="btn-start">Start</button>
<button id="btn-pause">Pause</button>
<button id="btn-resume">Resume</button>
<button id="btn-end">End</button>
</div>
`)
const btnStart = frag.querySelector(`button[id="btn-start"]`)
const btnPause = frag.querySelector(`button[id="btn-pause"]`)
const btnResume = frag.querySelector(`button[id="btn-resume"]`)
const btnEnd = frag.querySelector(`button[id="btn-end"]`)
document.querySelector('body').append(frag)
btnStart.onclick = (event) => {
this.chunks = [] // clear
this.mediaRecorder.start() // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/start
console.log(this.mediaRecorder.state) // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/state
}
btnPause.onclick = (event) => { // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/pause
this.mediaRecorder.pause()
console.log(this.mediaRecorder.state)
}
btnResume.onclick = (event) => {
this.mediaRecorder.resume()
console.log(this.mediaRecorder.state)
}
btnEnd.onclick = (event) => {
this.mediaRecorder.requestData() // trigger ``ondataavailable`` // https://developer.mozilla.org/en-US/docs/Web/API/MediaRecorder/requestData
this.mediaRecorder.stop()
console.log(this.mediaRecorder.state)
}
}
}
class Video2Canvas {
/**
* #description Create a canvas and save the frame of the video that you are giving.
* #param {HTMLVideoElement} video
* #param {Number} fps
* #see https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
* */
constructor(video, fps) {
this.video = video
this.fps = fps
this.canvas = document.createElement("canvas");
[this.canvas.width, this.canvas.height] = [video.width, video.height]
document.querySelector("body").append(this.canvas)
this.ctx = this.canvas.getContext('2d')
this.initEventListener()
}
initEventListener() {
this.video.addEventListener("play", ()=>{
const timeout = Math.round(1000/this.fps)
const width = this.video.width
const height = this.video.height
const recordFunc = ()=> {
if (this.video.paused || this.video.ended) {
return
}
this.ctx.drawImage(this.video, 0, 0, width, height)
/*
const frame = this.ctx.getImageData(0, 0, width, height)
// ... // you can make some modifications to change the frame. For example, create the grayscale frame: https://developer.mozilla.org/en-US/docs/Web/Guide/Audio_and_video_manipulation#video_manipulation
// 👇 Below is the options. That saves each frame as a link. If you wish, then you can click the link to download the picture.
const range = document.createRange()
const frag = range.createContextualFragment('<div><a></a></div>')
const tmpCanvas = document.createElement('canvas')
tmpCanvas.width = this.canvas.width
tmpCanvas.height = this.canvas.height
tmpCanvas.getContext('2d').putImageData(frame, 0, 0)
const a = frag.querySelector('a')
a.innerText = "my.png"
a.download = "my.png"
const quality = 1.0
a.href = tmpCanvas.toDataURL("image/png", quality)
a.append(tmpCanvas)
document.querySelector('body').append(frag)
*/
setTimeout(recordFunc, timeout)
}
setTimeout(recordFunc, timeout)
})
}
}
(()=>{
const v2c = new Video2Canvas(document.querySelector("video"), 60)
const canvasRecord = new CanvasRecord(v2c.canvas, 25, 'video/webm')
v2c.video.addEventListener("play", (event)=>{
if (canvasRecord.mediaRecorder.state === "inactive") {
return
}
document.getElementById("btn-resume").click()
})
v2c.video.addEventListener("pause", (event)=>{
if (canvasRecord.mediaRecorder.state === "inactive") {
return
}
document.getElementById("btn-pause").click()
})
})()
<video id="my-video" controls="true" width="480" height="270" crossorigin="anonymous">
<source src="http://jplayer.org/video/webm/Big_Buck_Bunny_Trailer.webm" type="video/webm">
</video>