Is there any way to create room name text inside the room element in forge viewer?
I have room elements in the forge viewer as per the below image.
So, I can read a room name from element properties. Then, I want to create room name text in forge viewer. May I have the solution?
Thanks In advance,
Update 2021-06-29
Added some conditions to avoid invalid data input.
/////////////////////////////////////////////////////////////////////
// Copyright (c) Autodesk, Inc. All rights reserved
// Written by Forge Partner Development
//
// Permission to use, copy, modify, and distribute this software in
// object code form for any purpose and without fee is hereby granted,
// provided that the above copyright notice appears in all copies and
// that both that copyright notice and the limited warranty and
// restricted rights notice below appear in all supporting
// documentation.
//
// AUTODESK PROVIDES THIS PROGRAM 'AS IS' AND WITH ALL FAULTS.
// AUTODESK SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTY OF
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE. AUTODESK, INC.
// DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
// UNINTERRUPTED OR ERROR FREE.
/////////////////////////////////////////////////////////////////////
//ref: https://stackoverflow.com/a/61262544
class TextMeasurer {
constructor() {
const SVG_NS = 'http://www.w3.org/2000/svg';
this.svg = document.createElementNS(SVG_NS, 'svg');
this.svg.style.visibility = 'hidden';
this.svg.setAttribute('xmlns', SVG_NS)
this.svg.setAttribute('width', 0);
this.svg.setAttribute('height', 0);
this.svgtext = document.createElementNS(SVG_NS, 'text');
this.svg.appendChild(this.svgtext);
this.svgtext.setAttribute('x', 0);
this.svgtext.setAttribute('y', 0);
document.querySelector('body').appendChild(this.svg);
}
/**
* Measure a single line of text, including the bounding box, inner size and lead and trail X
* #param {string} text Single line of text
* #param {string} fontFamily Name of font family
* #param {string} fontSize Font size including units
*/
measureText(text, fontFamily, fontSize) {
this.svgtext.setAttribute('font-family', fontFamily);
this.svgtext.setAttribute('font-size', fontSize);
this.svgtext.textContent = text;
let bbox = this.svgtext.getBBox();
let textLength = this.svgtext.getComputedTextLength();
// measure the overflow before and after the line caused by font side bearing
// Rendering should start at X + leadX to have the edge of the text appear at X
// when rendering left-aligned left-to-right
let baseX = parseInt(this.svgtext.getAttribute('x'));
let overflow = bbox.width - textLength;
let leadX = Math.abs(baseX - bbox.x);
let trailX = overflow - leadX;
document.querySelector('body').removeChild(this.svg);
return {
bbWidth: bbox.width,
textLength: textLength,
leadX: leadX,
trailX: trailX,
bbHeight: bbox.height
};
}
}
class AecRoomTagsExtension extends Autodesk.Viewing.Extension {
constructor(viewer, options) {
super(viewer, options);
this.modelBuilder = null;
this.idPrefix = 100;
}
async load() {
const modelBuilderExt = await this.viewer.loadExtension('Autodesk.Viewing.SceneBuilder');
const modelBuilder = await modelBuilderExt.addNewModel({
conserveMemory: false,
modelNameOverride: 'Room Tags'
});
this.modelBuilder = modelBuilder;
if (!this.viewer.isLoadDone()) {
this.viewer.addEventListener(
Autodesk.Viewing.GEOMETRY_LOADED_EVENT,
() => this.createRoomTags(),
{ once: true }
);
} else {
this.createRoomTags();
}
return true;
}
unload() {
this.viewer.impl.unloadModel(this.modelBuilder.model);
return true;
}
pxToMm(val) {
return val / 3.7795275591;
}
mmToFt(val) {
return val / 304.8;
}
createLabel(params) {
const text = params.text;
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
ctx.fillStyle = 'yellow';
ctx.fillRect(0, 0, canvas.width, canvas.height);
const fontSize = params.fontSize || 512;
const fontName = 'serif';
let offset = 2;
//Usage:
let m = new TextMeasurer();
let textDimensions = m.measureText(text, fontName, `${fontSize}px`);
canvas.height = textDimensions.bbHeight - (fontSize / 32 + 2) * offset;
canvas.width = textDimensions.bbWidth + offset + 3 * offset;
ctx.textBaseline = 'top';
ctx.fillStyle = '#000';
ctx.textAlign = 'left';
ctx.font = `${fontSize}px ${fontName}`;
ctx.fillStyle = 'white';
ctx.fillRect(0, 0, textDimensions.bbWidth + offset * 2, canvas.height);
ctx.fillStyle = '#000';
ctx.fillText(text, offset, offset + (fontSize / 32 + 3) * offset);
ctx.strokeRect(0, 0, textDimensions.bbWidth + offset * 2, canvas.height);
const labelBlobUrl = canvas.toDataURL();
//console.log(labelBlobUrl);
const image = new Image();
const texture = new THREE.Texture();
texture.image = image;
image.src = labelBlobUrl;
image.onload = function () {
texture.needsUpdate = true;
};
const labelDbId = this.idPrefix++;
const matName = `label-mat-${labelDbId}`;
const material = new THREE.MeshPhongMaterial({ map: texture, side: THREE.DoubleSide, opacity: 0.8, transparent: true });
material.map.minFilter = THREE.LinearFilter;
this.modelBuilder.addMaterial(matName, material);
const labelMat = this.modelBuilder.findMaterial(matName);
const planeWidth = this.mmToFt(this.pxToMm(canvas.width));
const planeHeight = this.mmToFt(this.pxToMm(canvas.height));
let planeGeo = new THREE.PlaneBufferGeometry(planeWidth, planeHeight);
let plane = new THREE.Mesh(planeGeo, labelMat);
plane.matrix = new THREE.Matrix4().compose(
params.position,
new THREE.Quaternion(0, 0, 0, 1),
new THREE.Vector3(1, 1, 1)
);
plane.dbId = labelDbId;
this.modelBuilder.addMesh(plane);
}
async createRoomTags() {
const getRoomDbIdsAsync = () => {
return new Promise((resolve, reject) => {
this.viewer.search(
'Revit Rooms',
(dbIds) => resolve(dbIds),
(error) => reject(error),
['Category'],
{ searchHidden: true }
);
});
};
const getPropertiesAsync = (dbId, model) => {
return new Promise((resolve, reject) => {
model.getProperties2(
dbId,
(result) => resolve(result),
(error) => reject(error)
);
});
};
const getBoxAsync = (dbId, model) => {
return new Promise((resolve, reject) => {
const tree = model.getInstanceTree();
const frags = model.getFragmentList();
let bounds = new THREE.Box3();
tree.enumNodeFragments(dbId, function (fragId) {
let box = new THREE.Box3();
frags.getWorldBounds(fragId, box);
bounds.union(box);
}, true);
return resolve(bounds);
});
};
const getRoomNameAsync = async (dbId, model) => {
const tree = model.getInstanceTree();
let name = tree.getNodeName(dbId);
if (!name) {
const props = await getPropertiesAsync(dbId, model);
name = props?.name;
}
return name;
};
try {
let roomDbIds = await getRoomDbIdsAsync();
if (!roomDbIds || roomDbIds.length <= 0) {
throw new Error('No Rooms found in current model');
}
const model = this.viewer.model;
const currentViewableId = this.viewer.model?.getDocumentNode().data.viewableID;
const masterViews = this.viewer.model?.getDocumentNode().getMasterViews();
const masterViewIds = masterViews?.map(v => v.data.viewableID);
if (!masterViewIds.includes(currentViewableId)) {
throw new Error('Current view does not contain any Rooms');
}
for (let i = 0; i < roomDbIds.length; i++) {
const dbId = roomDbIds[i];
const name = await getRoomNameAsync(dbId, model);
if (!name) {
console.warn(`[AecRoomTagsExtension]: ${dbId} Room \`${name}\` doesn't have valid name`);
continue;
}
const roomProps = await getPropertiesAsync(dbId, model);
const possibleViewableIds = roomProps.properties.filter(prop => prop.attributeName === 'viewable_in').map(prop => prop.displayValue);
if (!possibleViewableIds.includes(currentViewableId)) {
console.warn(`[AecRoomTagsExtension]: ${dbId} Room \`${name}\` is not visible in current view \`${currentViewableId}\``);
continue;
}
const box = await getBoxAsync(dbId, model);
if (!box) {
console.warn(`[AecRoomTagsExtension]: ${dbId} Room \`${name}\` has an invalid bounding box`);
continue;
}
const center = box.center();
if (isNaN(center.x) || isNaN(center.y) || isNaN(center.z)) {
console.warn(`[AecRoomTagsExtension]: ${dbId} Room \`${name}\` has an invalid bounding box`);
continue;
}
//console.log(i, dbId, name, box, center);
const pos = new THREE.Vector3(
center.x,
center.y,
box.min.z + this.mmToFt(10)
);
this.createLabel({
text: name.replace(/ *\[[^)]*\] */g, ""),
position: pos,
fontSize: 512 // in pixel
});
}
// uncomment to prevent selection on tags
// const dbIds = this.modelBuilder.model.getFragmentList().fragments.fragId2dbId;
// const model = this.modelBuilder.model;
// this.viewer.lockSelection(dbIds, true, model);
} catch (ex) {
console.warn(`[AecRoomTagsExtension]: ${ex}`);
}
}
}
Autodesk.Viewing.theExtensionManager.registerExtension('Autodesk.ADN.AecRoomTagsExtension', AecRoomTagsExtension);
=============================
It's similar to the Gird solution: https://stackoverflow.com/a/68129012/7745569
Not perfect, but it works. You may need to adjust the tag placement point (position) based on your model. Currently, tags are placed on the center of the bottom face of the Room bounding box.
/////////////////////////////////////////////////////////////////////
// Copyright (c) Autodesk, Inc. All rights reserved
// Written by Forge Partner Development
//
// Permission to use, copy, modify, and distribute this software in
// object code form for any purpose and without fee is hereby granted,
// provided that the above copyright notice appears in all copies and
// that both that copyright notice and the limited warranty and
// restricted rights notice below appear in all supporting
// documentation.
//
// AUTODESK PROVIDES THIS PROGRAM 'AS IS' AND WITH ALL FAULTS.
// AUTODESK SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTY OF
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE. AUTODESK, INC.
// DOES NOT WARRANT THAT THE OPERATION OF THE PROGRAM WILL BE
// UNINTERRUPTED OR ERROR FREE.
/////////////////////////////////////////////////////////////////////
//ref: https://stackoverflow.com/a/61262544
class TextMeasurer {
constructor() {
const SVG_NS = 'http://www.w3.org/2000/svg';
this.svg = document.createElementNS(SVG_NS, 'svg');
this.svg.style.visibility = 'hidden';
this.svg.setAttribute('xmlns', SVG_NS)
this.svg.setAttribute('width', 0);
this.svg.setAttribute('height', 0);
this.svgtext = document.createElementNS(SVG_NS, 'text');
this.svg.appendChild(this.svgtext);
this.svgtext.setAttribute('x', 0);
this.svgtext.setAttribute('y', 0);
document.querySelector('body').appendChild(this.svg);
}
/**
* Measure a single line of text, including the bounding box, inner size and lead and trail X
* #param {string} text Single line of text
* #param {string} fontFamily Name of font family
* #param {string} fontSize Font size including units
*/
measureText(text, fontFamily, fontSize) {
this.svgtext.setAttribute('font-family', fontFamily);
this.svgtext.setAttribute('font-size', fontSize);
this.svgtext.textContent = text;
let bbox = this.svgtext.getBBox();
let textLength = this.svgtext.getComputedTextLength();
// measure the overflow before and after the line caused by font side bearing
// Rendering should start at X + leadX to have the edge of the text appear at X
// when rendering left-aligned left-to-right
let baseX = parseInt(this.svgtext.getAttribute('x'));
let overflow = bbox.width - textLength;
let leadX = Math.abs(baseX - bbox.x);
let trailX = overflow - leadX;
document.querySelector('body').removeChild(this.svg);
return {
bbWidth: bbox.width,
textLength: textLength,
leadX: leadX,
trailX: trailX,
bbHeight: bbox.height
};
}
}
class AecRoomTagsExtension extends Autodesk.Viewing.Extension {
constructor(viewer, options) {
super(viewer, options);
this.modelBuilder = null;
this.idPrefix = 100;
}
async load() {
const modelBuilderExt = await this.viewer.loadExtension('Autodesk.Viewing.SceneBuilder');
const modelBuilder = await modelBuilderExt.addNewModel({
conserveMemory: false,
modelNameOverride: 'Room Tags'
});
this.modelBuilder = modelBuilder;
if (!this.viewer.isLoadDone()) {
this.viewer.addEventListener(
Autodesk.Viewing.GEOMETRY_LOADED_EVENT,
() => this.createRoomTags(),
{ once: true }
);
} else {
this.createRoomTags();
}
return true;
}
unload() {
this.viewer.impl.unloadModel(this.modelBuilder.model);
return true;
}
pxToMm(val) {
return val / 3.7795275591;
}
mmToFt(val) {
return val / 304.8;
}
createLabel(params) {
const text = params.text;
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
ctx.fillStyle = 'yellow';
ctx.fillRect(0, 0, canvas.width, canvas.height);
const fontSize = params.fontSize || 512;
const fontName = 'serif';
let offset = 2;
//Usage:
let m = new TextMeasurer();
let textDimensions = m.measureText(text, fontName, `${fontSize}px`);
canvas.height = textDimensions.bbHeight - (fontSize / 32 + 2) * offset;
canvas.width = textDimensions.bbWidth + offset + 3 * offset;
ctx.textBaseline = 'top';
ctx.fillStyle = '#000';
ctx.textAlign = 'left';
ctx.font = `${fontSize}px ${fontName}`;
ctx.fillStyle = 'white';
ctx.fillRect(0, 0, textDimensions.bbWidth + offset * 2, canvas.height);
ctx.fillStyle = '#000';
ctx.fillText(text, offset, offset + (fontSize / 32 + 3) * offset);
ctx.strokeRect(0, 0, textDimensions.bbWidth + offset * 2, canvas.height);
const labelBlobUrl = canvas.toDataURL();
//console.log(labelBlobUrl);
const image = new Image();
const texture = new THREE.Texture();
texture.image = image;
image.src = labelBlobUrl;
image.onload = function () {
texture.needsUpdate = true;
};
const planeWidth = this.mmToFt(this.pxToMm(canvas.width));
const planeHeight = this.mmToFt(this.pxToMm(canvas.height));
let planeGeo = new THREE.PlaneBufferGeometry(planeWidth, planeHeight);
let plane = new THREE.Mesh(planeGeo, new THREE.MeshPhongMaterial({ map: texture, side: THREE.DoubleSide, opacity: 0.8, transparent: true }));
plane.matrix = new THREE.Matrix4().compose(
params.position,
new THREE.Quaternion(0, 0, 0, 1),
new THREE.Vector3(1, 1, 1)
);
plane.dbId = this.idPrefix++;
this.modelBuilder.addMesh(plane);
}
async createRoomTags() {
const getRoomDbIdsAsync = () => {
return new Promise((resolve, reject) => {
this.viewer.search(
'Revit Rooms',
(dbIds) => resolve(dbIds),
(error) => reject(error),
['Category'],
{ searchHidden: true }
);
});
};
const getPropertiesAsync = (dbId, model) => {
return new Promise((resolve, reject) => {
model.getProperties2(
dbId,
(result) => resolve(result),
(error) => reject(error)
);
});
};
const getBoxAsync = (dbId, model) => {
return new Promise((resolve, reject) => {
const tree = model.getInstanceTree();
const frags = model.getFragmentList();
tree.enumNodeFragments(dbId, function (fragId) {
let bounds = new THREE.Box3();
frags.getWorldBounds(fragId, bounds);
return resolve(bounds);
}, true);
});
};
const getRoomName = (dbId, model) => {
const tree = model.getInstanceTree();
return tree.getNodeName(dbId);
};
try {
const roomDbIds = await getRoomDbIdsAsync();
if (!roomDbIds || roomDbIds.length <= 0) {
throw new Error('No Rooms found in current model');
}
const model = this.viewer.model;
const currentViewableId = this.viewer.model?.getDocumentNode().data.viewableID;
const firstRoomProps = await getPropertiesAsync(roomDbIds[0], this.viewer.model);
const possibleViewableIds = firstRoomProps.properties.filter(prop => prop.attributeName === 'viewable_in').map(prop => prop.displayValue);
const masterViews = this.viewer.model?.getDocumentNode().getMasterViews();
const masterViewIds = masterViews?.map(v => v.data.viewableID);
if (!masterViewIds.includes(currentViewableId) || !possibleViewableIds.includes(currentViewableId)) {
throw new Error('Current view does not contain any Rooms');
}
for (let i = roomDbIds.length - 1; i >= 0; i--) {
const dbId = roomDbIds[i];
const box = await getBoxAsync(dbId, model);
const name = getRoomName(dbId, model);
const center = box.center();
const pos = new THREE.Vector3(
center.x,
center.y,
box.min.z + this.mmToFt(10)
);
this.createLabel({
text: name.replace(/ *\[[^)]*\] */g, ""),
position: pos,
fontSize: 512 // in pixel
});
}
// uncomment to prevent selection on tags
// const dbIds = this.modelBuilder.model.getFragmentList().fragments.fragId2dbId;
// const model = this.modelBuilder.model;
// this.viewer.lockSelection(dbIds, true, model);
} catch (ex) {
console.warn(`[AecRoomTagsExtension]: ${ex}`);
}
}
}
Autodesk.Viewing.theExtensionManager.registerExtension('Autodesk.ADN.AecRoomTagsExtension', AecRoomTagsExtension);
Here are the dmeo snapshots:
I want original image size in offline mode also as shown in online mode in image 2.
My problem is that when i gone in app without internet connection, i loss full quality and size of images, because of i am using convertToDataURLviaCanvas().
so, please give here solution as soon as possible.
My code is:
I am using this function to convert all images:
convertToDataURLviaCanvas(url, outputFormat) {
return new Promise((resolve, reject) =>
{
let img = new Image();
img.crossOrigin = 'Anonymous';
img.onload = function () {
let canvas = <HTMLCanvasElement>document.createElement('CANVAS'),
ctx = canvas.getContext('2d'),
dataURL;
canvas.height = 1000;
canvas.width = 1000;
ctx.drawImage(img, 0, 0);
dataURL = canvas.toDataURL();
//callback(dataURL);
canvas = null;
resolve(dataURL);
};
img.src = url;
});
}
getTravelAdviceData() {
if(this.restProvider.getNetworkType() == 'none') {
// this.onlineGrid = false;
// this.offlineGrid = true;
this.storage.get('companyLogoOffline').then((data) => {
// Do something with latitude value
// console.log("offline data", data);
this.getcompanyLogo = data;
});
this.storage.get('travelTips64Image').then((data) => {
// Do something with latitude value
// console.log("offline data", data);
this.adviceArray = data;
// console.log("offline this.adviceArray", this.adviceArray);
});
} else {
// this.offlineGrid = false;
// this.onlineGrid = true;
this.restProvider.getTravelAdvice()
.then(data => {
let serviceData : any = data['consejosviaje'];
// this.adviceArray = serviceData;
let base64Image;
for (let i in serviceData) {
this.imagen = serviceData[i].imagen;
this.convertToDataURLviaCanvas(this.imagen, "image/jpeg").then(base64 => {
base64Image = base64;
this.texto = serviceData[i].texto;
this.adviceArrays64.push({'texto': this.texto, 'imagen': base64Image});
this.storage.set("travelTips64Image", this.adviceArrays64);
this.adviceArray = this.adviceArrays64;
});
}
});
}
}
How to render an image blob to a canvas element?
So far i have these two (simplified) functions to capture an image, transform it to a blob and eventually render the blob on a canvas
in this codepen, it just returns the default black image.
var canvas = document.getElementById('canvas');
var input = document.getElementById('input');
var ctx = canvas.getContext('2d');
var photo;
function picToBlob() {
var file = input.files[0];
canvas.toBlob(function(blob) {
var newImg = document.createElement("img"),
url = URL.createObjectURL(blob);
newImg.onload = function() {
ctx.drawImage(this, 0, 0);
photo = blob;
URL.revokeObjectURL(url);
};
newImg.src = url;
}, file.type, 0.5);
canvas.renderImage(photo);
}
HTMLCanvasElement.prototype.renderImage = function(blob) {
var canvas = this;
var ctx = canvas.getContext('2d');
var img = new Image();
img.onload = function() {
ctx.drawImage(img, 0, 0)
}
img.src = URL.createObjectURL(blob);
}
input.addEventListener('change', picToBlob, false);
I think you need to tidy up your code a bit. It's hard to know what you are trying to achieve because there are many unnecessary lines of code. The main problem is that blob is coming undefined here
HTMLCanvasElement.prototype.renderImage = function(blob){
because photo never gets initialized here inside the toBlob function...which is unnecessary for what you are trying to achieve.
Here's a simplified working version of your code snippet
var canvas = document.getElementById('canvas');
var input = document.getElementById('input');
function picToBlob() {
canvas.renderImage(input.files[0]);
}
HTMLCanvasElement.prototype.renderImage = function(blob){
var ctx = this.getContext('2d');
var img = new Image();
img.onload = function(){
ctx.drawImage(img, 0, 0)
}
img.src = URL.createObjectURL(blob);
};
input.addEventListener('change', picToBlob, false);
<input type='file' accept='image' capture='camera' id='input'>
<canvas id = 'canvas'></canvas>
You can also use createImageBitmap to directly render a blob into the canvas:
createImageBitmap(blob).then(imageBitmap=>{ctx.drawImage(imageBitmap,0,0)})
var canvas = document.getElementById('canvas');
var input = document.getElementById('input');
function blobToCanvas() {
createImageBitmap(input.files[0]).then(imageBitmap => {
console.log(imageBitmap);
canvas.getContext('2d').drawImage(imageBitmap, 0, 0)
})
}
input.addEventListener('change', blobToCanvas, false);
<input type='file' accept='image' capture='camera' id='input'>
<canvas id='canvas'></canvas>
You can use it as below
function renderImage(canvas, blob) {
const ctx = canvas.getContext('2d')
const img = new Image()
img.onload = (event) => {
URL.revokeObjectURL(event.target.src) // 👈 This is important. If you are not using the blob, you should release it if you don't want to reuse it. It's good for memory.
ctx.drawImage(event.target, 0, 0)
}
img.src = URL.createObjectURL(blob)
}
below is an example
/**
* #param {HTMLCanvasElement} canvas: https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API
* #param {Blob} blob: https://developer.mozilla.org/en-US/docs/Web/API/Blob
* */
function renderImage(canvas, blob) {
const ctx = canvas.getContext('2d')
switch (blob.type) {
case "image/jpeg": // Normally, you don't need it (switch), but if you have a special case, then you can consider it.
case "image/png":
const img = new Image()
img.onload = (event) => {
URL.revokeObjectURL(event.target.src) // Once it loaded the resource, then you can free it at the beginning.
ctx.drawImage(event.target, 0, 0)
}
img.src = URL.createObjectURL(blob)
break
}
}
// 👇 below is test
(() => {
const canvas = document.querySelector('canvas')
const input = document.querySelector('input')
input.addEventListener('change',
(event) => {
const file = event.target.files[0]
const blob = new Blob(
[file],
{"type": file.type} // If the type is unknown, default is empty string.
)
renderImage(canvas, blob)
}
)
})()
<div><input type='file' accept='.png,.jpg'></div>
<canvas></canvas>
another example to show you What effect of the revokeObjectURL.
<div></div>
<canvas width="477" height="600"></canvas>
<script>
async function renderImage(canvas, blob, isNeedRevoke=true) {
const ctx = canvas.getContext('2d')
const img = new Image() // The upper part of the painting.
const img2 = new Image() // The lower part of the painting.
await new Promise(resolve => {
img.onload = (event) => {
if (isNeedRevoke) {
URL.revokeObjectURL(event.target.src)
}
ctx.drawImage(event.target,
0, 0, 477, 300,
0, 0, 477, 300
)
resolve()
}
img.src = URL.createObjectURL(blob)
setTimeout(resolve, 2000)
}).then(() => {
img2.onload = (event) => {
ctx.drawImage(event.target,
0, 300, 477, 300,
0, 300, 477, 300
)
}
img2.src = img.src // 👈 If URL.revokeObjectURL(img.src) happened, then img2.src can't find the resource, such that img2.onload will not happen.
})
}
function CreateTestButton(canvas, btnText, isNeedRevoke) {
const button = document.createElement("button")
button.innerText = btnText
button.onclick = async (event) => {
canvas.getContext("2d").clearRect(0, 0, canvas.width, canvas.height) // clear canvas
fetch("https://upload.wikimedia.org/wikipedia/commons/thumb/6/6a/PNG_Test.png/477px-PNG_Test.png")
.then(async response=>{
const blob = await response.blob()
renderImage(canvas, blob, isNeedRevoke)
}).catch(err=>console.error(err))
}
return button
}
(() => {
window.onload = () => {
const canvas = document.querySelector('canvas')
const div = document.querySelector('div')
const btn1 = CreateTestButton(canvas, "Without URL.revokeObjectURL", false)
const btn2 = CreateTestButton(canvas, "URL.revokeObjectURL", true)
div.append(btn1, btn2)
}
})()
</script>
I would like to create a decibel meter for the audio that is playing in a video element. The video element is playing a WebRTC stream.
At the moment WebRTC streams cannot be passed into a Web Audio Analyzer. (Although this might change soon … ) (see Web Audio API analyser node getByteFrequencyData returning blank array)
Is there currently another way to get decibel information from a remote mediastream?
Chrome 50 was released: As of the 13th of April 2016 using an Analyser Node with a MediaStreamAudioSourceNode works fine to get audio levels. The resulting audioLevels value can be animated or simply passed into a html meter element.
var _mediaStream = SOME_LOCAL_OR_RTP_MEDIASTREAM;
var _audioContext = new AudioContext();
var _audioAnalyser = [];
var _freqs = [];
var audioLevels = [0];
var _audioSource = _audioContext.createMediaStreamSource(_mediaStream);
var _audioGain1 = _audioContext.createGain();
var _audioChannelSplitter = _audioContext.createChannelSplitter(_audioSource.channelCount);
_audioSource.connect(_audioGain1);
_audioGain1.connect(_audioChannelSplitter);
_audioGain1.connect(_audioContext.destination);
for (let i = 0; i < _audioSource.channelCount; i++) {
_audioAnalyser[i] = _audioContext.createAnalyser();
_audioAnalyser[i].minDecibels = -100;
_audioAnalyser[i].maxDecibels = 0;
_audioAnalyser[i].smoothingTimeConstant = 0.8;
_audioAnalyser[i].fftSize = 32;
_freqs[i] = new Uint8Array(_audioAnalyser[i].frequencyBinCount);
_audioChannelSplitter.connect(_audioAnalyser[i], i, 0);
}
function calculateAudioLevels() {
setTimeout(() => {
for (let channelI = 0; channelI < _audioAnalyser.length; channelI++) {
_audioAnalyser[channelI].getByteFrequencyData(_freqs[channelI]);
let value = 0;
for (let freqBinI = 0; freqBinI < _audioAnalyser[channelI].frequencyBinCount; freqBinI++) {
value = Math.max(value, _freqs[channelI][freqBinI]);
}
audioLevels[channelI] = value / 256;
}
requestAnimationFrame(calculateAudioLevels.bind(this));
}, 1000 / 15); // Max 15fps — not more needed
}
This is a good example:
https://webrtc.github.io/samples/src/content/getusermedia/volume/
And this is the source code:
https://github.com/webrtc/samples/tree/gh-pages/src/content/getusermedia/volume
And this is a sample:
function recordAudio() {
try {
window.AudioContext = window.AudioContext || window.webkitAudioContext;
window.audioContext = new AudioContext();
const instantMeter = document.querySelector('#sound-meter');
const constraints = {'video': false, 'audio': true};
const stream = await navigator.mediaDevices.getUserMedia(constraints);
window.stream = stream;
const soundMeter = window.soundMeter = new SoundMeter(window.audioContext);
soundMeter.connectToSource(stream, function(e) {
if (e) {
alert(e);
return;
}
setInterval(() => {
instantMeter.value = soundMeter.instant.toFixed(2);
}, 200);
});
$('#sound-meter').show();
$('#audio-icon').hide()
} catch(error) {
console.error('Error recording audio.', error);
}
}
In order to try and get around the odd issue in having with CORS (here) I am attempting to reload any images loaded via canvas.loadFromJSON()
But, I am experiencing weird issues. Sometimes only one image is replaced, other times I get duplicates of one image.
Here is my code:
canvas.loadFromJSON(<?php echo json_encode($objects); ?>, function() {
var objArray = canvas.getObjects();
for (var i = 0; i < objArray.length; i++) {
canvas.setActiveObject(objArray[i]);
var activeObject = canvas.getActiveObject();
if(activeObject.type === 'image') {
fabric.util.loadImage(activeObject.src, function(img) {
var object = new fabric.Image(img);
object.hasControls = true;
object.lockUniScaling = true;
object.scaleX = activeObject.scaleX;
object.scaleY = activeObject.scaleY;
object.originX = activeObject.originX;
object.originY = activeObject.originY;
object.centeredRotation = true;
object.centeredScaling = true;
canvas.add(object);
}, null, {crossOrigin: 'Anonymous'});
canvas.remove(activeObject);
}
activeObject.setCoords();
}
canvas.deactivateAll();
canvas.renderAll();
canvas.calcOffset();
});
Any ideas why I'm getting these weird issues?
First glance at your code I don't see anything wrong... But I'm also thinking the code might be a bit inefficient? Is there a need to create a new image instance?
I believe you should be able to just set the crossOrigin property on the image object.
This code is untested, but I'd try something like this:
canvas.loadFromJSON(<?php echo json_encode($objects); ?>, function() {
var objArray = canvas.getObjects();
for (var i = 0; i < objArray.length; i++) {
canvas.setActiveObject(objArray[i]);
var activeObject = canvas.getActiveObject();
if(activeObject.type === 'image') {
activeObject.crossOrigin = 'Anonymous';
}
}
canvas.deactivateAll();
canvas.renderAll();
canvas.calcOffset();
});
I had the same problem and overcome it downloading again the image then reassign it to object._element once each fabric object was created using loadFromJSON.
export const getImage = url => {
return new Promise((resolve, reject) => {
let img = new Image();
img.onload = () => resolve(img);
img.onerror = reject;
img.setAttribute('crossOrigin', 'anonymous');
img.src = url;
});
}
canvas.loadFromJSON(json, canvas.renderAll.bind(canvas), async (o, object) => {
if (object.type === "image") {
let imagecore = await getImage(object.src);
object._element = imagecore;
}
});