How to get the position of overlay geometry on hover? - autodesk-forge

In my forge viewer project I am trying to add point-cloud markup to mark selected items in some saved viewpoint. I can create the markers using below code-
createPointCloud(points, overlayName) {
if (points.length > 0) {
try {
const vertexShader = `
attribute vec4 color;
varying vec4 vColor;
void main() {
vec4 vPosition = modelViewMatrix * vec4(position, 1.0);
gl_Position = projectionMatrix * vPosition;
gl_PointSize = 20.0;
vColor = color;
}
`;
// Fragment Shader code
const fragmentShader = `
#ifdef GL_ES
precision highp float;
#endif
varying vec4 vColor;
void main() {
gl_FragColor = vColor;
}
`
// Shader material parameters
this.shader = {
side: THREE.DoubleSide,
depthWrite: this.occlusion,
depthTest: this.occlusion,
fragmentShader,
vertexShader,
attributes: {
color: {
type: 'v4',
value: []
}
}
}
// Initialize geometry vertices
// and shader attribute colors
this.geometry = new THREE.Geometry()
for (const point of points) {
this.geometry.vertices.push(
new THREE.Vector3(point.position.x, point.position.y, point.position.z))
this.shader.attributes.color.value.push(
new THREE.Vector4(
point.color.r/255.00,
point.color.g / 255.00,
point.color.b / 255.00,
1.0)
)
}
// creates shader material
let shaderMaterial = new THREE.ShaderMaterial(this.shader);
// creates THREE.PointCloud
let pointCloud = new THREE.PointCloud(
this.geometry, shaderMaterial)
this.overlayPointCloudMap[overlayName] = pointCloud;
this.viewer.impl.createOverlayScene(overlayName);
this.viewer.impl.addOverlay(overlayName, pointCloud);
} catch (ex) {
alert('Can\'t show points, please try again!');
}
}
}
Also want to show some modal on hover over the markup. To do that I need to know the position of the markup that is currently hovered. I've tried a solution based on this. But not able to get the correct item. Sometimes it returns empty list. The hit-test code is like below-
updateHitTest(event) {
const pointer = event.pointers ? event.pointers[0] : event;
const pointerVector = new THREE.Vector3();
const pointerDir = new THREE.Vector3();
const ray = new THREE.Raycaster();
ray.params.PointCloud.threshold = 20; // hit-test markup size = 20
const camera = this.viewer.impl.camera;
const rect = this.viewer.impl.canvas.getBoundingClientRect();
const x = ((pointer.clientX - rect.left) / rect.width) * 2 - 1;
const y = - ((pointer.clientY - rect.top) / rect.height) * 2 + 1;
if (camera.isPerspective) {
pointerVector.set(x, y, 0.5);
pointerVector.unproject(camera);
ray.set(camera.position, pointerVector.sub(camera.position).normalize());
} else {
pointerVector.set(x, y, -1);
pointerVector.unproject(camera);
pointerDir.set(0, 0, -1);
ray.set(pointerVector, pointerDir.transformDirection(camera.matrixWorld));
}
let nodes = [];
// loop through all the overlays and intersect with the respected point-cloud
for (const overlay of this.overlayNames) {
nodes = nodes.concat( ray.intersectObject(this.overlayPointCloudMap[overlay]));
}
if (nodes.length > 0) {
if (this.lastClickedIndex != nodes[0].index) {
this.lastClickedIndex = nodes[0].index;
console.log(this.lastClickedIndex);
}
}
}
Could you please help me to figure out where to change or how can I get the job done? TIA

The code seems alright. But you may need to adjust the ray.params.PointCloud.threshold value. Also, try debugging the three.js code by adding a single point to the point cloud, and stepping into the ray.intersectObject method to see it can't find it.
Alternatively, if the number of points/markers you expect to have on screen is "reasonable", consider the approach of adding them as HTML elements overlaying the 3D view es explained in https://github.com/autodesk-forge/forge-extensions/tree/master/public/extensions/IconMarkupExtension.

Related

How can i add an svg image and a label to a custom shape dynamically mxgraph

`function Domain() {
mxCylinder.call(this);
}
/*
The next lines use an mxCylinder instance to augment the
prototype of the shape ("inheritance") and reset the
constructor to the topmost function of the c'tor chain.
*/
mxUtils.extend(Domain, mxCylinder);
Domain.prototype.redrawPath = function (
path: any,
x: any,
y: any,
w: any,
h: any
) {
var dy = this.extrude * this.scale;
var dx = this.extrude * this.scale;
path.moveTo(0, dy);
path.lineTo(dx, 0);
path.lineTo(w, 0);
path.lineTo(w, h - dy);
path.lineTo(w, h);
path.lineTo(0, h);
path.lineTo(0, dy);
path.lineTo(dx, 0);
path.close();
};
mxCellRenderer.registerShape("Domain", Domain);
export function main(container: any) {
if (!mxClient.isBrowserSupported()) {
mxUtils.error("Browser is not supported!", 200, false);
} else {
mxEvent.disableContextMenu(container);
var graph = new mxGraph(container);
graph.setCellsCloneable(true);
graph.setHtmlLabels(true);
graph.setPanning(true);
graph.setEnabled(false);
graph.centerZoom = false;
new mxRubberband(graph);
configureStylesheet(graph);
var parent = graph.getDefaultParent();
// Adds cells to the model in a single step
graph.getModel().beginUpdate();
try {
var v1 = graph.insertVertex(
parent,
null,
"RAN",
20,
20,
240,
120,
// 'image;image=icons/secure-gdpr-user-svgrepo-com.svg;selectable=0;connectable=0;editable=0;movable=0;fillColor=hsl(36deg 98% 51%)'
//here i'm regestering my shape how can i add an image and a label inside the shape "shape=Domain;startSize=30;fillColor=hsl(36deg 98% 51%);spacingLeft=10;align=left;fontColor=#FFFFFF;fontSize=18;shadow=0;strokeColor=none;whiteSpace=wrap;img;"
);
} finally {
// Updates the display
graph.getModel().endUpdate();
}
}
}
i Used configure style sheet to add the image to the shape but when i do this
// style[mxConstants.STYLE_SHAPE] = "Domain";
the image disappears and i get only the sape
export function configureStylesheet(graph: any) {
var style: any = new Object();
style = mxUtils.clone(style);
// style[mxConstants.STYLE_PERIMETER] = mxPerimeter.RectanglePerimeter;
// style[mxConstants.STYLE_SHAPE] = mxConstants.SHAPE_LABEL;
// style[mxConstants.STYLE_SHAPE] = "Domain";
style[mxConstants.STYLE_SHAPE] = mxConstants.SHAPE_LABEL;
style[mxConstants.STYLE_ALIGN] = mxConstants.ALIGN_CENTER;
style[mxConstants.STYLE_VERTICAL_ALIGN] = mxConstants.ALIGN_TOP;
style[mxConstants.STYLE_IMAGE_ALIGN] = mxConstants.ALIGN_CENTER;
style[mxConstants.STYLE_IMAGE_VERTICAL_ALIGN] = mxConstants.ALIGN_TOP;
style[mxConstants.STYLE_IMAGE] = "icons/secure-gdpr-user-svgrepo-com.svg";
style[mxConstants.STYLE_IMAGE_WIDTH] = "48";
style[mxConstants.STYLE_IMAGE_HEIGHT] = "48";
style[mxConstants.STYLE_SPACING_TOP] = "80";
style[mxConstants.STYLE_SPACING] = "8";
graph.getStylesheet().putCellStyle("img", style);
Here's the approach i'm trying to do but the thing is that the image is overwriting the shape and not able to display the image in my customised shape. i need to have that custom shape taking the label and the image and dynamically for it to be implemented more than once`

How to create a matrix in a loop for hittesting with colors without duplicate colors

I am trying to create a matrix with color hexagons that will allow for object hit-testing. This color matrix will sit behind a regular hexagon matrix(one without the matrix cells colored). I will mouse over and check for the color in the hidden matrix to indicate which object my mouse is over so I can simulate object detection. This should be straight forward but for some reason I cannot get the expected results.
I have a parent loop which loops through creating rows of the matrix. Each iteration calls the following function to paint the hexagon(which is defined in a collection of points made available through a hexagon service).
constructor(
private hexagons: HexagonService,
private state: StateService,
private randomColors: RandomColorService,
private drawingContextService: DrawingContextService) {
}
async DrawHexagonAsync(hidecolors: boolean): Promise<any> {
const color = this.randomColors.next();
let strokestyle = this.state.strokeStyle;
const context = this.drawingContextService.context;
console.log(color); // Look for duplicates in console
if (hidecolors) {
strokestyle = `#fff`;
}
context.strokeStyle = strokestyle;
context.lineWidth = this.state.lineWidth;
context.fillStyle = color;
context.lineJoin = 'round';
context.lineCap = 'round';
const points = this.hexagons.getHexagonPoints();
context.beginPath();
let x = this.drawingContextService.location.x + points[0].x;
let y = this.drawingContextService.location.y + points[0].y
context.moveTo(x,y);
// move to the start point
for (let i = 1; i < points.length; i++) {
x = points[i].x + this.drawingContextService.location.x;
y = points[i].y + this.drawingContextService.location.y;
context.lineTo(x, y);
context.stroke();
}
context.fill();
context.closePath();
}
At first glance it looks like it is working and creates a lovely hex matrix like the following:
At closer inspection, I noticed that some of the contiguous items get rendered as duplicate colors. Since, I need unique colors for hit-testing this will not do. I use a color service to iterate through and create unique colors.
// color service.
#Injectable({provideIn: 'root'})
export class RandomColorService {
colors: string[] = [];
index = 0;
constructor() {
while (this.colors.length < 5000) {
let random = () => {
let n = Math.random() * 256;
return Math.floor(n).toString(16);
}
// make sure there are no duplicate colors.
const color = `#${random()}${random()}${random()}`;
if (!this.colors.includes(color)) {
this.colors.push(color);
}
}
}
public next() {
if (this.index >= 5000) {
this.index = 0;
}
return this.colors[this.index++];
}
}
Initially, I put the checking code to make sure that the colors were unique but the same duplicate color problem occurs with or without the code check. The next function just returns the unique colors as needed and moves to the next.
I even put a console.log in the code to check and I don't see any duplicate log messages, yet the colors are clearly duplicates.
Using the following function against the colors clearly indicates that the colors are not similar(but in fact duplicate)
// Gets a pixel from the canvas and extracts the color.
onclick(evt) {
var pixelData = this.drawing.context.getImageData(evt.x, evt.y, 1, 1).data;
var hex = "#" + ("000000" + this.rgbToHex(pixelData[0], pixelData[1], pixelData[2])).slice(-6);
console.log(hex);
}
rgbToHex(r, g, b) {
if (r > 255 || g > 255 || b > 255)
throw "Invalid color component";
return ((r << 16) | (g << 8) | b).toString(16);
}
So naturally the question is, what am I missing?
The bug is in the color service. The numbers need to be padded with '0'. If the color is not 6 digits then the color change fails and the previous color is retained and used.
return Math.floor(n).toString(16).padStart(2, '0');
// color service.
#Injectable({provideIn: 'root'})
export class RandomColorService {
colors: string[] = [];
index = 0;
constructor() {
while (this.colors.length < 5000) {
let random = () => {
let n = Math.random() * 256;
return Math.floor(n).toString(16);
}
// make sure there are no duplicate colors.
const color = `#${random()}${random()}${random()}`;
if (!this.colors.includes(color)) {
this.colors.push(color);
}
}
}
public next() {
if (this.index >= 5000) {
this.index = 0;
}
return this.colors[this.index++];
}
}

HTML Canvas brush is doing additional lines

I am doing an image coloring web application with React and I am using html canvas for drawing. The problem is that the brush is sometimes doing these strange lines to coordinates where I didn't put the mouse. The mouse strokes I wanted to color are marked with the blue line, and the additional strokes that I don't want are circled with a yellow marker.
So my question is: How to solve the problem of the brush drawing additional strokes to points where I didn't put my mouse?
Linecap style:
Context.lineCap = "round"
The code for drawing is:
const drawMove = (clientX: number, clientY: number) => {
if (!isDrawing) {
return;
}
if (DrawCanvasRef.current) {
const Canvas = DrawCanvasRef.current;
const Context = Canvas.getContext("2d");
if (Context) {
const { x, y } = getMousePos(DrawCanvasRef, clientX, clientY);
if (x && y) {
Context.lineTo(x, y);
Context.stroke();
}
}
}
};
Other functions:
const startDrawing = (clientX: number, clientY: number) => {
if (DrawCanvasRef.current) {
const Canvas = DrawCanvasRef.current;
const Context = Canvas.getContext("2d");
if (Context) {
// Get current mouse positions
const { x, y } = getMousePos(DrawCanvasRef, clientX, clientY);
// If mouse position is allright, then begin line path
if (x && y) {
Context.beginPath();
Context.moveTo(x, y);
setIsDrawing(true);
}
}
}
};
const touchMove = (event: TouchEvent) => {
drawMove(
event.nativeEvent.touches[0].clientX,
event.nativeEvent.touches[0].clientY
);
};
const mouseMove = (event: MouseEvent) => {
drawMove(event.nativeEvent.clientX, event.nativeEvent.clientY);
};
const finishDrawing = () => {
if (DrawCanvasRef.current && HiddenCanvasRef.current) {
const Canvas = DrawCanvasRef.current;
const HiddenCanvas = HiddenCanvasRef.current;
const Context = Canvas.getContext("2d");
const HiddenContext = HiddenCanvas.getContext("2d");
if (Context && HiddenContext) {
// Stop drawing the path
Context.closePath();
setIsDrawing(false);
HiddenContext.drawImage(Canvas, 0, 0, CANVAS_WIDTH, CANVAS_HEIGHT);
clearCanvas(DrawCanvasRef);
}
}
};

In p5.js how do I create a moving animation of an array group of lines?

Hi I created a sparkler like shape in P5.js using array() and randomGaussian(). Here is what it looks like with the codes in p5.js:
let distribution = new Array(360);
let b = false;
let x, y;
function setup() {
var cny = createCanvas(windowWidth, windowHeight);
cnv.parent("sketchholder");
for (let i = 0; i < distribution.length; i++) {
distribution[i] = floor(randomGaussian(60, 50));
}
colorMode(HSB, 255);
// hue, saturation, brightness
x = width / 4;
у = height / 3;
}
function draw() {
background(21, 30, 10);
translate(width / 2, height / 2);
strokeWeight(3);
stroke(255, 70);
line(0, 0, -x, y);
if (b) {
for (let i = 0; i < distribution.length; i++) {
rotate(TWO_PI / distribution.length);
var colorH = random(0, 255);
var colorS = random(100, 200);
var colorB = random(0, 255);
var YY = random(1, 4);
stroke(colorH, colorS, colorB);
strokeWeight(YY);
strokeCap(ROUND);
let dist = abs(distribution[i]);
line(0, 0, dist, 0);
}
}
}
And I would like it to move slowly along the stick and disappear after like 20s before reaching the end. Can I achieve that in p5.js? Or can I create the same effect using the p5.js layer as a div in html and animate it in CSS?
Thanks.
Yes, you can definitely do this with p5.js. I think it would not make much sense to try to do an animation like this with CSS and that wouldn't work well with p5.js regardless (you would only be able to animate DOM elements). However, you do need to basically implement your own animation system with different parameter values for different times in the animation and then interpolating between them yourself. Here is a basic example:
let distribution = new Array(360);
let originX, originY;
let keyframes = [];
let keyframeTimes = [];
function setup() {
createCanvas(windowWidth, windowHeight);
for (let i = 0; i < distribution.length; i++) {
distribution[i] = floor(randomGaussian(60, 50));
}
colorMode(HSB, 255);
// hue, saturation, brightness
// Origin relative to the center of the sketch
originX = -width / 4;
originY = height / 3;
const lenX = width / 4;
const lenY = - height / 3;
// Add the desired parameters and times to keyframes and keyframeTimes
keyframes.push({
x: lenX,
y: lenY,
mag: 1
});
keyframeTimes.push(0);
keyframes.push({
x: 0.2 * lenX,
y: 0.2 * lenY,
mag: 0
});
keyframeTimes.push(20);
}
function draw() {
background(21, 30, 10);
translate(width / 2, height / 2);
translate(originX, originY);
strokeWeight(3);
stroke(255, 70);
// Find the next keyframe that we are animating towards.
let nextKeyframeIx = keyframeTimes.findIndex(t => t > millis() / 1000);
// Find the current/previous keyframe that we are starting from.
// Cases to consider: the next keyframe is the first one in the list, we're in
// between two keyframes, or there is no next keyframe
let keyframeIx =
nextKeyframeIx === 0 ?
// There is no previous keyframe (we're at the beginning)
0 :
// If we're beyond the end of the list, just use the parameters from the
// last keyframe
(nextKeyframeIx > 0 ? nextKeyframeIx - 1 : keyframes.length - 1);
let x, y, mag;
if (keyframeIx < nextKeyframeIx) {
// lerp between the current and next keyframe
let kf1 = keyframes[keyframeIx];
let kf2 = keyframes[nextKeyframeIx];
let t1 = keyframeTimes[keyframeIx];
let t2 = keyframeTimes[nextKeyframeIx];
let amt = (millis() / 1000 - t1) / (t2 - t1);
x = lerp(kf1.x, kf2.x, amt);
y = lerp(kf1.y, kf2.y, amt);
mag = lerp(kf1.mag, kf2.mag, amt);
} else {
// just use the current/previous keyframe
let kf = keyframes[keyframeIx];
x = kf.x;
y = kf.y;
mag = kf.mag;
}
// Draw a line from the origin (bottom left of the sparkler) to the current
// x,y position of the end of the sparkler
line(0, 0, x, y);
// Translate to the current end of the sparkler for drawing the colored lines
translate(x, y);
if (mag > 0) {
for (let i = 0; i < distribution.length; i++) {
rotate(TWO_PI / distribution.length);
const colorH = random(0, 255);
const colorS = random(100, 200);
const colorB = random(0, 255);
const YY = random(1, 4);
stroke(colorH, colorS, colorB);
strokeWeight(YY);
strokeCap(ROUND);
let dist = abs(distribution[i]) * mag;
line(0, 0, dist, 0);
}
}
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.4.0/p5.js"></script>

WebGL – Use mesh as mask for background image

I have the following problem to solve with WebGL:
Imagine a mesh in front of the camera. The mesh is not actually shaded but its "silhouette" as a whole is used to reveal a background image as some sort of mask or stencil if you will. So e.g. you would have an output image with a white background and a silhouette-shape filled with a texture/image being the background at that position.
(The meshes I want to use are obviously more complex than a sphere)
What is the best way to achieve such an effect? I thought about either projection mapping as to sort of projecting the background texture onto the mesh from the cameras perspective. Or is maybe a stencil buffer the way to go? From what I've read, the support of it is not so high at this point. Maybe there is also a way simpler method to solve this issue that I've missed?
There tons of ways to do this, which is best for you is up to you.
Use the stencil buffer
Draw your mesh into the stencil buffer then draw your image with the stencil test set so it only draws where the mesh was drawn
var geoVS = `
attribute vec4 position;
uniform mat4 matrix;
void main() {
gl_Position = matrix * position;
}
`;
var geoFS = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1); // doesn't matter. We're only using the stencil
}
`;
var imgVS = `
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = position.xy * .5 + .5; // only works if position is -1 <-> +1 quad
}
`;
var imgFS = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const imgPrgInfo = twgl.createProgramInfo(gl, [imgVS, imgFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const tex = twgl.createTexture(gl, {
src: "https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg",
crossOrigin: "",
flipY: true,
});
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
// draw geometry to generate stencil
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
});
// write 1 to stencil
gl.enable(gl.STENCIL_TEST);
gl.stencilFunc(gl.ALWAYS, 1, 0xFF);
gl.stencilOp(gl.KEEP, gl.KEEP, gl.REPLACE);
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
// draw image where stencil is set
gl.useProgram(imgPrgInfo.program);
twgl.setBuffersAndAttributes(gl, imgPrgInfo, quadBufferInfo);
twgl.setUniforms(imgPrgInfo, {
tex: tex,
});
gl.stencilFunc(gl.EQUAL, 1, 0xFF);
gl.stencilOp(gl.KEEP, gl.KEEP, gl.KEEP);
gl.drawElements(gl.TRIANGLES, quadBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
Use the depth buffer
Draw your mesh into the depth buffer then draw your image with the depth function set so it only draws where the mesh was drawn.
var geoVS = `
attribute vec4 position;
uniform mat4 matrix;
void main() {
gl_Position = matrix * position;
}
`;
var geoFS = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1); // doesn't matter. We're only using the stencil
}
`;
var imgVS = `
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = position.xy * .5 + .5; // only works if position is -1 <-> +1 quad
}
`;
var imgFS = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const imgPrgInfo = twgl.createProgramInfo(gl, [imgVS, imgFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const tex = twgl.createTexture(gl, {
src: "https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg",
crossOrigin: "",
flipY: true,
});
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearDepth(0); // clear depth to 0 (normally it's 1)
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
// draw geometry to generate depth
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
});
gl.depthFunc(gl.ALWAYS); // we only care about silhouette
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
// draw image where depth is set
gl.useProgram(imgPrgInfo.program);
twgl.setBuffersAndAttributes(gl, imgPrgInfo, quadBufferInfo);
twgl.setUniforms(imgPrgInfo, {
tex: tex,
});
gl.depthFunc(gl.LESS);
// this quad is drawn at z = 0 which is in the middle Z wize. Should probably
// make it 1 so it's in the back but it's working as is so too lazy to
// change
gl.drawElements(gl.TRIANGLES, quadBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
Use CSS
Set the canvas's CSS background to an image. Clear the canvas to some color, draw your mesh with 0,0,0,0 to cut a hole.
var geoVS = `
attribute vec4 position;
uniform mat4 matrix;
void main() {
gl_Position = matrix * position;
}
`;
var geoFS = `
precision mediump float;
void main() {
gl_FragColor = vec4(0);
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearColor(1, 1, 1, 1); // clear to white
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
// draw in 0,0,0,0 to cut a whole in the canvas to the HTML/CSS
// defined background
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
});
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block;
background-image: url(https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg);
background-size: 100% 100%;
}
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
Generate a texture mask
Draw the mesh to a texture through framebuffer to generate a silhouette in the texture. Use that texture as input to another shader as a mask
var geoVS = `
attribute vec4 position;
uniform mat4 matrix;
void main() {
gl_Position = matrix * position;
}
`;
var geoFS = `
precision mediump float;
void main() {
gl_FragColor = vec4(1);
}
`;
var imgVS = `
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = position.xy * .5 + .5; // only works if position is -1 <-> +1 quad
}
`;
var imgFS = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D colorTex;
uniform sampler2D maskTex;
void main() {
vec4 color = texture2D(colorTex, v_texcoord);
vec4 mask = texture2D(maskTex, v_texcoord);
gl_FragColor = color * mask;
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const imgPrgInfo = twgl.createProgramInfo(gl, [imgVS, imgFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const tex = twgl.createTexture(gl, {
src: "https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg",
crossOrigin: "",
flipY: true,
});
// with no options creates a framebuffer with an RGBA8 texture
// and depth buffer
const fbi = twgl.createFramebufferInfo(gl);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// with no argument will resize to the canvas size
twgl.resizeFramebufferInfo(gl, fbi);
}
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, fbi);
// first draw the geometry to the texture
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
});
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
// the texture now is black (0,0,0,0) where there's nothing and (1,1,1,1)
// where are geometry was drawn
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, null);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
// draw image using our texture as a mask
gl.useProgram(imgPrgInfo.program);
twgl.setBuffersAndAttributes(gl, imgPrgInfo, quadBufferInfo);
twgl.setUniforms(imgPrgInfo, {
colorTex: tex,
maskTex: fbi.attachments[0],
});
gl.drawElements(gl.TRIANGLES, quadBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
Personally I'd probably use the last one as it's more flexible. You can use any technique to generate the mask. The mask will have levels (as in you can set it to 0.5 to get a 50/50 blend). That means you can get an antialiased each if you want. You can mask each color separate amounts. You could easily blend between 2 images, etc. You could add other effects like displacement maps etc to the final pass.
Here's an example of rendering a cube in shades of gray and using the result to blend 2 images.
var geoVS = `
attribute vec4 position;
attribute vec3 normal;
uniform mat4 matrix;
varying vec3 v_normal;
void main() {
gl_Position = matrix * position;
v_normal = (matrix * vec4(normal, 0)).xyz;
}
`;
var geoFS = `
precision mediump float;
uniform vec3 u_lightDir;
varying vec3 v_normal;
void main() {
gl_FragColor = vec4(dot(normalize(v_normal), u_lightDir) * .5 + .5);
}
`;
var imgVS = `
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = position.xy * .5 + .5; // only works if position is -1 <-> +1 quad
}
`;
var imgFS = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D color1Tex;
uniform sampler2D color2Tex;
uniform sampler2D maskTex;
void main() {
// it probably doesn't make sense to use the same
// texcoords for all 3 textures but I'm lazy
vec4 color1 = texture2D(color1Tex, v_texcoord);
vec4 color2 = texture2D(color2Tex, v_texcoord);
vec4 mask = texture2D(maskTex, v_texcoord);
gl_FragColor = mix(color1, color2, mask);
}
`;
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const imgPrgInfo = twgl.createProgramInfo(gl, [imgVS, imgFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const textures = twgl.createTextures(gl, {
tex1: {
src: "https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg",
crossOrigin: "",
flipY: true,
},
tex2: {
src: "https://farm1.staticflickr.com/339/18414821420_e3d0a8ec5f_z_d.jpg",
crossOrigin: "",
flipY: true,
},
});
// with no options creates a framebuffer with an RGBA8 texture
// and depth buffer
const fbi = twgl.createFramebufferInfo(gl);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// with no argument will resize to the canvas size
twgl.resizeFramebufferInfo(gl, fbi);
}
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, fbi);
// first draw the geometry to the texture
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
u_lightDir: v3.normalize([1, 2, 3]),
});
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
// the texture now is black (0,0,0,0) where there's nothing and (1,1,1,1)
// where are geometry was drawn
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, null);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
// draw image using our texture as a mask
gl.useProgram(imgPrgInfo.program);
twgl.setBuffersAndAttributes(gl, imgPrgInfo, quadBufferInfo);
twgl.setUniforms(imgPrgInfo, {
color1Tex: textures.tex1,
color2Tex: textures.tex2,
maskTex: fbi.attachments[0],
});
gl.drawElements(gl.TRIANGLES, quadBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
In general you can probably achieve many more effects using the texture mask but it really depends on your goal.
Both approach can work.
The 'projection' one is probably more efficient and straightforward. It work with one single pass. You just need to substitute the classic UVs coordinates by the screen coordinates of vertices, in your vertex shader.
varying vec2 vTexCoord;
void main( void ){
// whatever how gl_Position is compute
gl_Position = uMVP * vec4(aPosition, 1.0);
// vTexCoord = aTexCoord;
// replace the standard UVs by the vertex screen position
vTexCoord = .5 * ( gl_Position.xy / gl_Position.w ) + .5;
}
You still need to tweak thoses texture coordinates to respect screen/texture aspect ratio, scale etc.