WebGL – Use mesh as mask for background image - html

I have the following problem to solve with WebGL:
Imagine a mesh in front of the camera. The mesh is not actually shaded but its "silhouette" as a whole is used to reveal a background image as some sort of mask or stencil if you will. So e.g. you would have an output image with a white background and a silhouette-shape filled with a texture/image being the background at that position.
(The meshes I want to use are obviously more complex than a sphere)
What is the best way to achieve such an effect? I thought about either projection mapping as to sort of projecting the background texture onto the mesh from the cameras perspective. Or is maybe a stencil buffer the way to go? From what I've read, the support of it is not so high at this point. Maybe there is also a way simpler method to solve this issue that I've missed?

There tons of ways to do this, which is best for you is up to you.
Use the stencil buffer
Draw your mesh into the stencil buffer then draw your image with the stencil test set so it only draws where the mesh was drawn
var geoVS = `
attribute vec4 position;
uniform mat4 matrix;
void main() {
gl_Position = matrix * position;
}
`;
var geoFS = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1); // doesn't matter. We're only using the stencil
}
`;
var imgVS = `
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = position.xy * .5 + .5; // only works if position is -1 <-> +1 quad
}
`;
var imgFS = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const imgPrgInfo = twgl.createProgramInfo(gl, [imgVS, imgFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const tex = twgl.createTexture(gl, {
src: "https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg",
crossOrigin: "",
flipY: true,
});
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
// draw geometry to generate stencil
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
});
// write 1 to stencil
gl.enable(gl.STENCIL_TEST);
gl.stencilFunc(gl.ALWAYS, 1, 0xFF);
gl.stencilOp(gl.KEEP, gl.KEEP, gl.REPLACE);
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
// draw image where stencil is set
gl.useProgram(imgPrgInfo.program);
twgl.setBuffersAndAttributes(gl, imgPrgInfo, quadBufferInfo);
twgl.setUniforms(imgPrgInfo, {
tex: tex,
});
gl.stencilFunc(gl.EQUAL, 1, 0xFF);
gl.stencilOp(gl.KEEP, gl.KEEP, gl.KEEP);
gl.drawElements(gl.TRIANGLES, quadBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
Use the depth buffer
Draw your mesh into the depth buffer then draw your image with the depth function set so it only draws where the mesh was drawn.
var geoVS = `
attribute vec4 position;
uniform mat4 matrix;
void main() {
gl_Position = matrix * position;
}
`;
var geoFS = `
precision mediump float;
void main() {
gl_FragColor = vec4(1, 0, 0, 1); // doesn't matter. We're only using the stencil
}
`;
var imgVS = `
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = position.xy * .5 + .5; // only works if position is -1 <-> +1 quad
}
`;
var imgFS = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord);
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const imgPrgInfo = twgl.createProgramInfo(gl, [imgVS, imgFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const tex = twgl.createTexture(gl, {
src: "https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg",
crossOrigin: "",
flipY: true,
});
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearDepth(0); // clear depth to 0 (normally it's 1)
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
// draw geometry to generate depth
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
});
gl.depthFunc(gl.ALWAYS); // we only care about silhouette
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
// draw image where depth is set
gl.useProgram(imgPrgInfo.program);
twgl.setBuffersAndAttributes(gl, imgPrgInfo, quadBufferInfo);
twgl.setUniforms(imgPrgInfo, {
tex: tex,
});
gl.depthFunc(gl.LESS);
// this quad is drawn at z = 0 which is in the middle Z wize. Should probably
// make it 1 so it's in the back but it's working as is so too lazy to
// change
gl.drawElements(gl.TRIANGLES, quadBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
Use CSS
Set the canvas's CSS background to an image. Clear the canvas to some color, draw your mesh with 0,0,0,0 to cut a hole.
var geoVS = `
attribute vec4 position;
uniform mat4 matrix;
void main() {
gl_Position = matrix * position;
}
`;
var geoFS = `
precision mediump float;
void main() {
gl_FragColor = vec4(0);
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.clearColor(1, 1, 1, 1); // clear to white
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
// draw in 0,0,0,0 to cut a whole in the canvas to the HTML/CSS
// defined background
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
});
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block;
background-image: url(https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg);
background-size: 100% 100%;
}
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
Generate a texture mask
Draw the mesh to a texture through framebuffer to generate a silhouette in the texture. Use that texture as input to another shader as a mask
var geoVS = `
attribute vec4 position;
uniform mat4 matrix;
void main() {
gl_Position = matrix * position;
}
`;
var geoFS = `
precision mediump float;
void main() {
gl_FragColor = vec4(1);
}
`;
var imgVS = `
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = position.xy * .5 + .5; // only works if position is -1 <-> +1 quad
}
`;
var imgFS = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D colorTex;
uniform sampler2D maskTex;
void main() {
vec4 color = texture2D(colorTex, v_texcoord);
vec4 mask = texture2D(maskTex, v_texcoord);
gl_FragColor = color * mask;
}
`;
const m4 = twgl.m4;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const imgPrgInfo = twgl.createProgramInfo(gl, [imgVS, imgFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const tex = twgl.createTexture(gl, {
src: "https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg",
crossOrigin: "",
flipY: true,
});
// with no options creates a framebuffer with an RGBA8 texture
// and depth buffer
const fbi = twgl.createFramebufferInfo(gl);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// with no argument will resize to the canvas size
twgl.resizeFramebufferInfo(gl, fbi);
}
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, fbi);
// first draw the geometry to the texture
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
});
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
// the texture now is black (0,0,0,0) where there's nothing and (1,1,1,1)
// where are geometry was drawn
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, null);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
// draw image using our texture as a mask
gl.useProgram(imgPrgInfo.program);
twgl.setBuffersAndAttributes(gl, imgPrgInfo, quadBufferInfo);
twgl.setUniforms(imgPrgInfo, {
colorTex: tex,
maskTex: fbi.attachments[0],
});
gl.drawElements(gl.TRIANGLES, quadBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
Personally I'd probably use the last one as it's more flexible. You can use any technique to generate the mask. The mask will have levels (as in you can set it to 0.5 to get a 50/50 blend). That means you can get an antialiased each if you want. You can mask each color separate amounts. You could easily blend between 2 images, etc. You could add other effects like displacement maps etc to the final pass.
Here's an example of rendering a cube in shades of gray and using the result to blend 2 images.
var geoVS = `
attribute vec4 position;
attribute vec3 normal;
uniform mat4 matrix;
varying vec3 v_normal;
void main() {
gl_Position = matrix * position;
v_normal = (matrix * vec4(normal, 0)).xyz;
}
`;
var geoFS = `
precision mediump float;
uniform vec3 u_lightDir;
varying vec3 v_normal;
void main() {
gl_FragColor = vec4(dot(normalize(v_normal), u_lightDir) * .5 + .5);
}
`;
var imgVS = `
attribute vec4 position;
varying vec2 v_texcoord;
void main() {
gl_Position = position;
v_texcoord = position.xy * .5 + .5; // only works if position is -1 <-> +1 quad
}
`;
var imgFS = `
precision mediump float;
varying vec2 v_texcoord;
uniform sampler2D color1Tex;
uniform sampler2D color2Tex;
uniform sampler2D maskTex;
void main() {
// it probably doesn't make sense to use the same
// texcoords for all 3 textures but I'm lazy
vec4 color1 = texture2D(color1Tex, v_texcoord);
vec4 color2 = texture2D(color2Tex, v_texcoord);
vec4 mask = texture2D(maskTex, v_texcoord);
gl_FragColor = mix(color1, color2, mask);
}
`;
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector("canvas").getContext("webgl", {stencil: true});
const geoPrgInfo = twgl.createProgramInfo(gl, [geoVS, geoFS]);
const imgPrgInfo = twgl.createProgramInfo(gl, [imgVS, imgFS]);
const geoBufferInfo = twgl.primitives.createCubeBufferInfo(gl, 1);
const quadBufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const textures = twgl.createTextures(gl, {
tex1: {
src: "https://farm9.staticflickr.com/8873/18598400202_3af67ef38f_z_d.jpg",
crossOrigin: "",
flipY: true,
},
tex2: {
src: "https://farm1.staticflickr.com/339/18414821420_e3d0a8ec5f_z_d.jpg",
crossOrigin: "",
flipY: true,
},
});
// with no options creates a framebuffer with an RGBA8 texture
// and depth buffer
const fbi = twgl.createFramebufferInfo(gl);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// with no argument will resize to the canvas size
twgl.resizeFramebufferInfo(gl, fbi);
}
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, fbi);
// first draw the geometry to the texture
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.enable(gl.DEPTH_TEST);
var fov = Math.PI * .25;
var aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
var zNear = 0.1;
var zFar = 10;
var mat = m4.perspective(fov, aspect, zNear, zFar);
mat = m4.translate(mat, [0, 0, -3]);
mat = m4.rotateX(mat, time * 0.81);
mat = m4.rotateZ(mat, time * 0.77);
gl.useProgram(geoPrgInfo.program);
twgl.setBuffersAndAttributes(gl, geoPrgInfo, geoBufferInfo);
twgl.setUniforms(geoPrgInfo, {
matrix: mat,
u_lightDir: v3.normalize([1, 2, 3]),
});
gl.drawElements(gl.TRIANGLES, geoBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
// the texture now is black (0,0,0,0) where there's nothing and (1,1,1,1)
// where are geometry was drawn
// calls gl.bindFramebuffer and gl.viewport
twgl.bindFramebufferInfo(gl, null);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
// draw image using our texture as a mask
gl.useProgram(imgPrgInfo.program);
twgl.setBuffersAndAttributes(gl, imgPrgInfo, quadBufferInfo);
twgl.setUniforms(imgPrgInfo, {
color1Tex: textures.tex1,
color2Tex: textures.tex2,
maskTex: fbi.attachments[0],
});
gl.drawElements(gl.TRIANGLES, quadBufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/3.x/twgl-full.min.js"></script>
<canvas></canvas>
In general you can probably achieve many more effects using the texture mask but it really depends on your goal.

Both approach can work.
The 'projection' one is probably more efficient and straightforward. It work with one single pass. You just need to substitute the classic UVs coordinates by the screen coordinates of vertices, in your vertex shader.
varying vec2 vTexCoord;
void main( void ){
// whatever how gl_Position is compute
gl_Position = uMVP * vec4(aPosition, 1.0);
// vTexCoord = aTexCoord;
// replace the standard UVs by the vertex screen position
vTexCoord = .5 * ( gl_Position.xy / gl_Position.w ) + .5;
}
You still need to tweak thoses texture coordinates to respect screen/texture aspect ratio, scale etc.

Related

How to get the position of overlay geometry on hover?

In my forge viewer project I am trying to add point-cloud markup to mark selected items in some saved viewpoint. I can create the markers using below code-
createPointCloud(points, overlayName) {
if (points.length > 0) {
try {
const vertexShader = `
attribute vec4 color;
varying vec4 vColor;
void main() {
vec4 vPosition = modelViewMatrix * vec4(position, 1.0);
gl_Position = projectionMatrix * vPosition;
gl_PointSize = 20.0;
vColor = color;
}
`;
// Fragment Shader code
const fragmentShader = `
#ifdef GL_ES
precision highp float;
#endif
varying vec4 vColor;
void main() {
gl_FragColor = vColor;
}
`
// Shader material parameters
this.shader = {
side: THREE.DoubleSide,
depthWrite: this.occlusion,
depthTest: this.occlusion,
fragmentShader,
vertexShader,
attributes: {
color: {
type: 'v4',
value: []
}
}
}
// Initialize geometry vertices
// and shader attribute colors
this.geometry = new THREE.Geometry()
for (const point of points) {
this.geometry.vertices.push(
new THREE.Vector3(point.position.x, point.position.y, point.position.z))
this.shader.attributes.color.value.push(
new THREE.Vector4(
point.color.r/255.00,
point.color.g / 255.00,
point.color.b / 255.00,
1.0)
)
}
// creates shader material
let shaderMaterial = new THREE.ShaderMaterial(this.shader);
// creates THREE.PointCloud
let pointCloud = new THREE.PointCloud(
this.geometry, shaderMaterial)
this.overlayPointCloudMap[overlayName] = pointCloud;
this.viewer.impl.createOverlayScene(overlayName);
this.viewer.impl.addOverlay(overlayName, pointCloud);
} catch (ex) {
alert('Can\'t show points, please try again!');
}
}
}
Also want to show some modal on hover over the markup. To do that I need to know the position of the markup that is currently hovered. I've tried a solution based on this. But not able to get the correct item. Sometimes it returns empty list. The hit-test code is like below-
updateHitTest(event) {
const pointer = event.pointers ? event.pointers[0] : event;
const pointerVector = new THREE.Vector3();
const pointerDir = new THREE.Vector3();
const ray = new THREE.Raycaster();
ray.params.PointCloud.threshold = 20; // hit-test markup size = 20
const camera = this.viewer.impl.camera;
const rect = this.viewer.impl.canvas.getBoundingClientRect();
const x = ((pointer.clientX - rect.left) / rect.width) * 2 - 1;
const y = - ((pointer.clientY - rect.top) / rect.height) * 2 + 1;
if (camera.isPerspective) {
pointerVector.set(x, y, 0.5);
pointerVector.unproject(camera);
ray.set(camera.position, pointerVector.sub(camera.position).normalize());
} else {
pointerVector.set(x, y, -1);
pointerVector.unproject(camera);
pointerDir.set(0, 0, -1);
ray.set(pointerVector, pointerDir.transformDirection(camera.matrixWorld));
}
let nodes = [];
// loop through all the overlays and intersect with the respected point-cloud
for (const overlay of this.overlayNames) {
nodes = nodes.concat( ray.intersectObject(this.overlayPointCloudMap[overlay]));
}
if (nodes.length > 0) {
if (this.lastClickedIndex != nodes[0].index) {
this.lastClickedIndex = nodes[0].index;
console.log(this.lastClickedIndex);
}
}
}
Could you please help me to figure out where to change or how can I get the job done? TIA
The code seems alright. But you may need to adjust the ray.params.PointCloud.threshold value. Also, try debugging the three.js code by adding a single point to the point cloud, and stepping into the ray.intersectObject method to see it can't find it.
Alternatively, if the number of points/markers you expect to have on screen is "reasonable", consider the approach of adding them as HTML elements overlaying the 3D view es explained in https://github.com/autodesk-forge/forge-extensions/tree/master/public/extensions/IconMarkupExtension.

webgl performance differences between firefox and chrome

I am currently developing an image processing tool for a webapplication. I need to take to png-images of the same size and combine them pixel by pixel. So far, I have set up a prototype (very much inspired by the tutorials on webglfundamentals.org) that takes two images and just multiplies their pixels. I am using the twgl-helper library for webgl from http://twgljs.org/ . (Which I could unfortunately not put into the fiddle).
I have the following question: Can anyone explain or give hints, why Firefox 78 is so much slower in this than a recent Chrome? FF averages about 34ms per render (complete refresh and wipe cash between samples) while Chrome averages 0.27ms per render. This is two orders of magnitude of a difference which i just can't explain. I have tried webgl2, it is slightly faster for both but keeps the insane difference between the two.
If I need to provide more info, pls let me know, I will be back in the office on thursday. Thank you for your support and ideas.
function main() {
// Get A WebGL context
var canvas = document.getElementById("webgl");
var gl = canvas.getContext("webgl");
if (!gl) {
return;
}
var canvas1 = document.getElementById("canvas1");
var canvas2 = document.getElementById("canvas2");
// setup GLSL program
var program = twgl.createProgramFromScripts(gl, ["2d-vertex-shader", "2d-fragment-shader"]);
gl.useProgram(program);
var time0 = performance.now();
// look up where the vertex data needs to go.
var positionLocation = gl.getAttribLocation(program, "a_position");
var texCoordLocation = gl.getAttribLocation(program, "a_texCoord");
// provide texture coordinates for the rectangle.
var texCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texCoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0]), gl.STATIC_DRAW);
// vertex attributes need to be turned on explicitly
gl.enableVertexAttribArray(texCoordLocation);
gl.vertexAttribPointer(texCoordLocation, 2, gl.FLOAT, false, 0, 0);
// lookup uniforms
var resolutionLocation = gl.getUniformLocation(program, "u_resolution");
// set the resolution
gl.uniform2f(resolutionLocation, canvas1.width, canvas1.height);
// Create a buffer for the position of the rectangle corners.
var buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.enableVertexAttribArray(positionLocation);
gl.vertexAttribPointer(positionLocation, 2, gl.FLOAT, false, 0, 0);
// Set a rectangle the same size as the image.
setRectangle(gl, 0, 0, canvas.width, canvas.height);
// setRectangle(gl, 0, 0, 1000, 1000);
function setupTexture(canvas, textureUnit, program, uniformName) {
var tex = gl.createTexture();
updateTextureFromCanvas(tex, canvas, textureUnit);
// Set the parameters so we can render any size image.
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
// gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
// mal ausprobieren
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
var location = gl.getUniformLocation(program, uniformName);
gl.uniform1i(location, textureUnit);
}
function updateTextureFromCanvas(tex, canvas, textureUnit) {
gl.activeTexture(gl.TEXTURE0 + textureUnit);
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, canvas);
}
var tex1 = setupTexture(canvas1, 0, program, "u_canvas1");
var tex2 = setupTexture(canvas2, 1, program, "u_canvas2");
// Draw the rectangle.
gl.drawArrays(gl.TRIANGLES, 0, 6);
var time1 = performance.now();
console.log("Processing image took " + (time1 - time0) + " ms.");
document.getElementById("performance").innerHTML = "Processing image took " + (time1 - time0) + " ms.";
}
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2]), gl.STATIC_DRAW);
}
const WIDTH = 1600;
const HEIGHT = 900;
addNewImage = function (path, id, width, height) {
console.log(path)
let newElement = document.createElement("canvas");
document.body.appendChild(newElement);
newElement.id = id;
let ctx = newElement.getContext("2d");
ctx.canvas.width = width;
ctx.canvas.height = height;
let input = new Image();
input.crossOrigin = "anonymous";
input.onload = function () {
ctx.drawImage(input, 0, 0);
}
input.src = path;
}
addNewImage("https://i.imgur.com/KjUybBD.png", "canvas1", WIDTH, HEIGHT);
addNewImage("https://i.imgur.com/ZKMnXce.png", "canvas2", WIDTH, HEIGHT);
canvas {
border: 2px solid black;
display: inline-block;
width: 100%;
}
<script src="twgl.js"></script>
<link rel="stylesheet" type="text/css" href="style.css" />
<button onclick="main()">click</button>
<!-- vertex shader -->
<script id="2d-vertex-shader" type="x-shader/x-vertex">
attribute vec2 a_position;
attribute vec2 a_texCoord;
uniform vec2 u_resolution;
varying vec2 v_texCoord;
void main() {
// convert the rectangle from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
// pass the texCoord to the fragment shader
// The GPU will interpolate this value between points.
v_texCoord = a_texCoord;
}
</script>
<!-- fragment shader -->
<script id="2d-fragment-shader" type="x-shader/x-fragment">
precision mediump float;
// our 2 canvases
uniform sampler2D u_canvas1;
uniform sampler2D u_canvas2;
// the texCoords passed in from the vertex shader.
// note: we're only using 1 set of texCoords which means
// we're assuming the canvases are the same size.
varying vec2 v_texCoord;
void main() {
// Look up a pixel from first canvas
vec4 color1 = texture2D(u_canvas1, v_texCoord);
// Look up a pixel from second canvas
vec4 color2 = texture2D(u_canvas2, v_texCoord);
// return the 2 colors multiplied
gl_FragColor = color1 * color2;
}
</script>
<!-- <canvas id="canvas1"></canvas>
<canvas id="canvas2"></canvas> -->
<div id="performance"></div>
<canvas id="webgl" width="1600" height="900"></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
It is not really possible to count on browsers having similar performance. There are plenty of tests where one browser is 2x to 40x faster than another.
In this particular case I don't know why modern Firefox is slower than Chrome. Chrome is multi-process (I thought Firefox was too at this point but maybe not) so in Chrome the timing is only timing how long it takes to insert commands into a command buffer to talk from the process that's running webpage to the separate process that talks to the GPU. It's not timing how long it actually takes to run those commands which run in parallel to the webpage.
If I add this after your draw call
// Force the webpage to wait for the GPU process
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(4));
Then I get comparable times for Chrome (27ms) vs Firefox (32ms)
function main() {
// Get A WebGL context
var canvas = document.getElementById("webgl");
var gl = canvas.getContext("webgl");
if (!gl) {
return;
}
var canvas1 = document.getElementById("canvas1");
var canvas2 = document.getElementById("canvas2");
// setup GLSL program
var program = twgl.createProgramFromScripts(gl, ["2d-vertex-shader", "2d-fragment-shader"]);
gl.useProgram(program);
var time0 = performance.now();
// look up where the vertex data needs to go.
var positionLocation = gl.getAttribLocation(program, "a_position");
var texCoordLocation = gl.getAttribLocation(program, "a_texCoord");
// provide texture coordinates for the rectangle.
var texCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texCoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0]), gl.STATIC_DRAW);
// vertex attributes need to be turned on explicitly
gl.enableVertexAttribArray(texCoordLocation);
gl.vertexAttribPointer(texCoordLocation, 2, gl.FLOAT, false, 0, 0);
// lookup uniforms
var resolutionLocation = gl.getUniformLocation(program, "u_resolution");
// set the resolution
gl.uniform2f(resolutionLocation, canvas1.width, canvas1.height);
// Create a buffer for the position of the rectangle corners.
var buffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.enableVertexAttribArray(positionLocation);
gl.vertexAttribPointer(positionLocation, 2, gl.FLOAT, false, 0, 0);
// Set a rectangle the same size as the image.
setRectangle(gl, 0, 0, canvas.width, canvas.height);
// setRectangle(gl, 0, 0, 1000, 1000);
function setupTexture(canvas, textureUnit, program, uniformName) {
var tex = gl.createTexture();
updateTextureFromCanvas(tex, canvas, textureUnit);
// Set the parameters so we can render any size image.
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
// gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
// mal ausprobieren
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
var location = gl.getUniformLocation(program, uniformName);
gl.uniform1i(location, textureUnit);
}
function updateTextureFromCanvas(tex, canvas, textureUnit) {
gl.activeTexture(gl.TEXTURE0 + textureUnit);
gl.bindTexture(gl.TEXTURE_2D, tex);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, canvas);
}
var tex1 = setupTexture(canvas1, 0, program, "u_canvas1");
var tex2 = setupTexture(canvas2, 1, program, "u_canvas2");
// Draw the rectangle.
gl.drawArrays(gl.TRIANGLES, 0, 6);
// Force the webpage to wait for the GPU process
gl.readPixels(0, 0, 1, 1, gl.RGBA, gl.UNSIGNED_BYTE, new Uint8Array(4));
var time1 = performance.now();
console.log("Processing image took " + (time1 - time0) + " ms.");
document.getElementById("performance").innerHTML = "Processing image took " + (time1 - time0) + " ms.";
}
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2]), gl.STATIC_DRAW);
}
const WIDTH = 1600;
const HEIGHT = 900;
addNewImage = function (path, id, width, height) {
console.log(path)
let newElement = document.createElement("canvas");
document.body.appendChild(newElement);
newElement.id = id;
let ctx = newElement.getContext("2d");
ctx.canvas.width = width;
ctx.canvas.height = height;
let input = new Image();
input.crossOrigin = "anonymous";
input.onload = function () {
ctx.drawImage(input, 0, 0);
}
input.src = path;
}
addNewImage("https://i.imgur.com/KjUybBD.png", "canvas1", WIDTH, HEIGHT);
addNewImage("https://i.imgur.com/ZKMnXce.png", "canvas2", WIDTH, HEIGHT);
canvas {
border: 2px solid black;
display: inline-block;
width: 100%;
}
<script src="twgl.js"></script>
<link rel="stylesheet" type="text/css" href="style.css" />
<button onclick="main()">click</button>
<!-- vertex shader -->
<script id="2d-vertex-shader" type="x-shader/x-vertex">
attribute vec2 a_position;
attribute vec2 a_texCoord;
uniform vec2 u_resolution;
varying vec2 v_texCoord;
void main() {
// convert the rectangle from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
// pass the texCoord to the fragment shader
// The GPU will interpolate this value between points.
v_texCoord = a_texCoord;
}
</script>
<!-- fragment shader -->
<script id="2d-fragment-shader" type="x-shader/x-fragment">
precision mediump float;
// our 2 canvases
uniform sampler2D u_canvas1;
uniform sampler2D u_canvas2;
// the texCoords passed in from the vertex shader.
// note: we're only using 1 set of texCoords which means
// we're assuming the canvases are the same size.
varying vec2 v_texCoord;
void main() {
// Look up a pixel from first canvas
vec4 color1 = texture2D(u_canvas1, v_texCoord);
// Look up a pixel from second canvas
vec4 color2 = texture2D(u_canvas2, v_texCoord);
// return the 2 colors multiplied
gl_FragColor = color1 * color2;
}
</script>
<!-- <canvas id="canvas1"></canvas>
<canvas id="canvas2"></canvas> -->
<div id="performance"></div>
<canvas id="webgl" width="1600" height="900"></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
Of course the fact that Chrome is running the commands in another process means you get some parallel processing for free. I think most of the time this is a win for Chrome WebGL performance over Firefox WebGL performance but not always.
The only other thing that comes to mind for the remaining difference in speed is how the browsers transfer the canvas to texture. There are many possibilities
First off 2D canvases keep their data as premultiplied alpha but WebGL defaults to wanting un-premultiplied alpha so
The browser has the 2D canvas in ram. It has to convert that data to unpremultiplied alpha then upload it via glTexImage2D. (Slow)
The browser has the 2D canvas in vram. It downloads it to ram, converts it to unpremultiplied alpha then uploads it via glTexImage2D (Even Slower)
The browser has the 2D canvas in vram. It attaches your texture to a framebuffer and renderers the canvas into it using a shader that unpremultiplies the alpha (fast).
I'm pretty positive Chrome does that last method. I know the code for it exists. I don't know all the conditions required to make sure that code is used but I'm pretty confident a 1900x600 canvas will take that path (at one point canvases below a certain size like 256x256 were done on the CPU, not the GPU but I have no idea if that is still true)
Firefox may or may not do the same thing but if it doesn't that could be why Chrome does this in 27ms and Firefox in 32ms when we stall the GPU process by calling gl.readPixels.
The larger point though is that browsers can optimize in many different ways and there is no guarantee which ways they will or won't optimize.

How to create 2D shapes with n-sides in WebGL using keyboard input?

I'm trying to create a program in WebGL that allows you to draw or create shapes of n-size via keyboard input. The user enters in the number of sides to generate a shape with that many sides. So, if you press '3', you will get a triangle, if you press '4', you will get a square, if you press '5', you will get a pentagon, etc.
So far, I've been able to create seperate pieces of code that create triangles, squares, pentagons, etc. without keyboard input but I'm not sure how to go about generating shapes within the same program with n-sides via user/keyboard input. How would I go about doing this?
Examples of my code so far:
Drawing a triangle:
var VSHADER_SOURCE =
'attribute vec4 a_Position;\n' +
'void main() {\n' +
' gl_Position = a_Position;\n' +
'}\n';
var FSHADER_SOURCE =
'void main() {\n' +
' gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);\n' +
'}\n';
function main() {
var canvas = document.getElementById('webgl');
var gl = getWebGLContext(canvas);
if (!gl) {
console.log('Failed to get the rendering context for WebGL');
return;
}
if (!initShaders(gl, VSHADER_SOURCE, FSHADER_SOURCE)) {
console.log('Failed to initialize shaders.');
return;
}
var n = initVertexBuffers(gl);
if (n < 0) {
console.log('Failed to set the positions of the vertices');
return;
}
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawArrays(gl.TRIANGLES, 0, n);
}
function initVertexBuffers(gl) {
var vertices = new Float32Array([
0, 0.5, -0.5, -0.5, 0.5, -0.5
]);
var n = 3; // The number of vertices
var vertexBuffer = gl.createBuffer();
if (!vertexBuffer) {
console.log('Failed to create the buffer object');
return -1;
}
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
var a_Position = gl.getAttribLocation(gl.program, 'a_Position');
if (a_Position < 0) {
console.log('Failed to get the storage location of a_Position');
return -1;
}
gl.vertexAttribPointer(a_Position, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(a_Position);
return n;
}
Drawing a square:
var VSHADER_SOURCE =
'attribute vec4 a_Position;\n' +
'void main() {\n' +
' gl_Position = a_Position;\n' +
'}\n';
var FSHADER_SOURCE =
'void main() {\n' +
' gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);\n' +
'}\n';
function main() {
var canvas = document.getElementById('webgl');
var gl = getWebGLContext(canvas);
if (!gl) {
console.log('Failed to get the rendering context for WebGL');
return;
}
if (!initShaders(gl, VSHADER_SOURCE, FSHADER_SOURCE)) {
console.log('Failed to initialize shaders.');
return;
}
var n = initVertexBuffers(gl);
if (n < 0) {
console.log('Failed to set the positions of the vertices');
return;
}
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, n);
}
function initVertexBuffers(gl) {
var vertices = new Float32Array([
-1, -1, -1, 1, 1, 1, 1, -1, -1, -1,
]);
var n = 5; // The number of vertices
var vertexBuffer = gl.createBuffer();
if (!vertexBuffer) {
console.log('Failed to create the buffer object');
return -1;
}
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
var a_Position = gl.getAttribLocation(gl.program, 'a_Position');
if (a_Position < 0) {
console.log('Failed to get the storage location of a_Position');
return -1;
}
gl.vertexAttribPointer(a_Position, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(a_Position);
return n;
}
You can start by writing a function computing vertices positions for a polygon with the number of sides as param.
For example, this one computes the polar coordinates of the polygon within a circle of given radius. You can write your own one.
computePolygonPositions(sides, radius)
{
let positions = []
for (let i=0; i<sides; i++)
{
let i0 = i
let i1 = (i+1) % sides
let theta0 = 2.0 * Math.PI * i0 / sides
let theta1 = 2.0 * Math.PI * i1 / sides
let x0 = radius * Math.cos(theta0)
let y0 = radius * Math.cos(theta0)
let x1 = radius * Math.cos(theta1)
let y1 = radius * Math.cos(theta1)
positions.push(0, 0)
positions.push(x0, y0)
positions.push(x1, y1)
}
return positions
}
Of course, you can upgrade this function to add indices, tex coordinates, colors or anything you need.
Once you're done with it, just call it to create a new vertex buffer that you'll bind on ARRAY_BUFFER, set the layout and enable the position attribute.

CreateJS Radial gradient with matrix

I'm converting a Flash application to HTML5 Canvas. Most of the development is finished but for handling the colors there is a code like this in the flash application:
matrix = new Matrix ();
matrix.createGradientBox (600, ColorHeight * 1200, 0, 80, ColorHeight * -600);
Animation_gradient_mc.clear ();
Animation_gradient_mc.beginGradientFill (fillType, colors, alphas, ratios, matrix, spreadMethod, interpolationMethod, focalPointRatio);
The declaration for a radial gradient in CreateJS is the following:
beginRadialGradientFill(colors, ratios, x0, y0, r0, x1, y1, r1 )
Does anyone know a method to apply a Matrix to a gradient fill?
Any help would be appreciated.
Thanks in advance
Edit
Here are some examples of the gradient I'm trying to reproduce:
As you can see it starts off as a standard radial gradient.
However, it can also appear stretched, I think this is where the matrix helps.
I've attempted to create the same effect by creating a createjs.Graphics.Fill with a matrix but it doesn't seem to be doing anything:
var matrix = new VacpMatrix();
matrix.createGradientBox(
600,
discharge_gradient.color_height * 1200,
0,
80,
discharge_gradient.color_height * -600
);
// test_graphics.append(new createjs.Graphics.Fill('#0000ff', matrix));
console.log('matrix', matrix);
test_graphics.append(new createjs.Graphics.Fill('#ff0000', matrix).radialGradient(
discharge_gradient.colors,
discharge_gradient.ratios,
discharge_gradient.x0,
discharge_gradient.y0,
discharge_gradient.r0,
discharge_gradient.x1,
discharge_gradient.y1,
discharge_gradient.r1
));
var discharge_shape = new createjs.Shape(test_graphics);
I extended the Matrix2d class to add a createGradientBox method using code from the openfl project:
p.createGradientBox = function (width, height, rotation, tx, ty) {
if (_.isUndefined(rotation) || _.isNull(rotation)) {
rotation = 0;
}
if (_.isUndefined(tx) || _.isNull(tx)) {
tx = 0;
}
if (_.isUndefined(ty) || _.isNull(ty)) {
ty = 0;
}
var a = width / 1638.4,
d = height / 1638.4;
// Rotation is clockwise
if (rotation != 0) {
var cos = math.cos(rotation),
sin = math.sin(rotation);
this.b = sin * d;
this.c = -sin * a;
this.a = a * cos;
this.d = d * cos;
} else {
this.b = 0;
this.c = 0;
}
this.tx = tx + width / 2;
this.ty = ty + height / 2;
}
I hope the extra information is useful.
I don't know createJS enough, nor Flash Matrix object, but to make this kind of ovalGradient with the native Canvas2d API, you will need to transform the context's matrix.
var canvas = document.getElementById("canvas");
var ctx = canvas.getContext("2d");
var horizontalScale = .3;
var verticalScale = 1;
var gradient = ctx.createRadialGradient(100/horizontalScale, 100/verticalScale, 100, 100/horizontalScale,100/verticalScale,0);
gradient.addColorStop(0,"green");
gradient.addColorStop(1,"red");
// shrink the context's matrix
ctx.scale(horizontalScale, verticalScale)
// draw your gradient
ctx.fillStyle = gradient;
// stretch the rectangle which contains the gradient accordingly
ctx.fillRect(0,0, 200/horizontalScale, 200/verticalScale);
// reset the context's matrix
ctx.setTransform(1,0,0,1,0,0);
canvas{ background-color: ivory;}
<canvas id="canvas" width="200" height="200"></canvas>
So if you are planning to write some kind of a function to reproduce it, have a look at ctx.scale(), ctx.transform() and ctx.setTransform().
EDIT
As you noticed, this will also shrink your drawn shapes, also, you will have to calculate how much you should "unshrink" those at the drawing, just like I did with the fillRect. (agreed, this one was an easy one)
Here is a function that could help you with more complicated shapes. I didn't really tested it (only with the given example), so it may fail somehow, but it can also give you an idea on how to deal with it :
var canvas = document.getElementById("canvas");
var ctx = canvas.getContext("2d");
function shrinkedRadial(ctx, shapeArray, xScale, yScale, gradientOpts) {
// scaling by 0 is like not drawing
if (!xScale || !yScale) return;
var gO = gradientOpts;
// apply our scale on the gradient options we passed
var gradient = ctx.createRadialGradient(gO.x0 / xScale, gO.y0 / yScale, gO.r0, gO.x1 / xScale, gO.y1 / yScale, gO.r1);
gradient.addColorStop(gO.c1_pos, gO.c1_fill);
gradient.addColorStop(gO.c2_pos, gO.c2_fill);
// shrink the context's matrix
ctx.scale(xScale, yScale);
ctx.fillStyle = gradient;
// execute the drawing operations' string
shapeArray.forEach(function(str) {
var val = str.split(' ');
var op = shapesRef[val[0]];
if (val[1]) {
var pos = val[1].split(',').map(function(v, i) {
// if even, it should be an y axis, otherwise an x one
return i % 2 ? v / yScale : v / xScale;
});
ctx[op].apply(ctx, pos);
} else {
// no parameters
ctx[op]();
}
});
// apply our gradient
ctx.fill();
// reset the transform matrix
ctx.setTransform(1, 0, 0, 1, 0, 0);
}
// just for shortening our shape drawing operations
// notice how arc operations are omitted, it could be implemented but...
var shapesRef = {
b: 'beginPath',
fR: 'fillRect',
m: 'moveTo',
l: 'lineTo',
bC: 'bezierCurveTo',
qC: 'quadraticCurveTo',
r: 'rect',
c: 'closePath'
};
var gradientOpts = {
x0: 232,
y0: 55,
r0: 70,
x1: 232,
y1: 55,
r1: 0,
c1_fill: 'red',
c1_pos: 0,
c2_fill: 'green',
c2_pos: 1
}
var shapes = ['b', 'm 228,133', 'bC 209,121,154,76,183,43', 'bC 199,28,225,34,233,59', 'bC 239,34,270,29,280,39', 'bC 317,76,248,124,230,133']
// our shape is drawn at 150px from the right so we do move the context accordingly, but you won't have to.
ctx.translate(-150, 0);
shrinkedRadial(ctx, shapes, .3, 1, gradientOpts);
ctx.font = '15px sans-serif';
ctx.fillStyle = 'black';
ctx.fillText('shrinked radialGradient', 3, 20);
// how it looks like without scaling :
ctx.translate(50, 0)
var gO = gradientOpts;
var gradient = ctx.createRadialGradient(gO.x0, gO.y0, gO.r0, gO.x1, gO.y1, gO.r1);
gradient.addColorStop(gO.c1_pos, gO.c1_fill);
gradient.addColorStop(gO.c2_pos, gO.c2_fill);
ctx.fillStyle = gradient;
shapes.forEach(function(str) {
var val = str.split(' ');
var op = shapesRef[val[0]];
if (val[1]) {
var pos = val[1].split(',');
ctx[op].apply(ctx, pos);
} else {
ctx[op]();
}
});
ctx.fill();
ctx.font = '15px sans-serif';
ctx.fillStyle = 'black';
ctx.fillText('normal radialGradient', 160, 20);
<canvas id="canvas" width="400" height="150"></canvas>
A standard matrix would adjust inputs:
Width, angle Horizontal, angle Vertical, Height, pos X, pos Y in that order,
Here you are using gradientBox which is not the usual type of AS3 matrix. Expected input:Width, Height, Rotation, pos X, pos Y
I don't use createJS so I'm gunna guess this (you build on it)...
Your usual beginRadialGradientFill(colors, ratios, x0, y0, r0, x1, y1, r1 )
becomes like below (as though gradientBox matrix is involved):
beginRadialGradientFill(colors, ratios, posX, posY, Rotation, Width, Height, Rotation )

webgl lighting shader working with firefox but not with chrome

I am currently developing a 3D engine using WebGL, and I encountered a problem with the lighting shader. I am using an array of structure in order to pass the light parameters to the vertex and fragment shaders (with the same array passed as "uniform"). It actualy works very well with Firefox 35, but not at all with Google chrome and I have no idea what is wrong with this code.
Here is the code for the vertex shader:
precision mediump float;
precision mediump int;
struct light{
vec3 ambient;
vec3 specular;
vec3 diffuse;
vec3 vector;
};
attribute vec3 aVertexPosition;
attribute vec4 aVertexColor;
attribute vec3 aVertexNormal;
uniform mat4 uMVMatrix;
uniform mat4 uPMatrix;
uniform mat4 uNormalMatrix;
varying vec4 vMaterialColor;
varying vec3 N;
varying vec3 V;
const int MAX_LIGHT = 3;
uniform light lights[MAX_LIGHT];
varying vec3 L[MAX_LIGHT];
uniform int lightCount;
void main(void) {
vec4 position4 = uMVMatrix * vec4(aVertexPosition, 1.0);
vec3 position = position4.xyz / position4.w;
V = normalize(-position);
for(int i = 0 ; i < MAX_LIGHT ; i++){
if (i < lightCount){
L[i] = normalize(lights[i].vector - position);
}
}
N = normalize(vec3(uNormalMatrix * vec4(aVertexNormal,0.0)));
gl_Position = uPMatrix * position4;
vMaterialColor = aVertexColor;
}
And the code for the fragment shader is:
precision mediump float;
precision mediump int;
varying vec3 N;
varying vec3 V;
struct materialProperties {
vec3 ambient;
vec3 diffuse;
vec3 specular;
vec3 emissive;
float shininess;
};
struct light{
vec3 ambient;
vec3 specular;
vec3 diffuse;
vec3 vector;
};
// Material
varying vec4 vMaterialColor;
uniform materialProperties uMaterial;
// Light
const int MAX_LIGHT = 3;
uniform light lights[MAX_LIGHT];
varying vec3 L[MAX_LIGHT];
uniform int lightCount;
void main(void) {
vec3 color = vec3(0.0);
for(int i = 0; i < MAX_LIGHT ; i++){
if(i < lightCount){
float dot_LN = max(dot(N,L[i]), 0.0);
float specular = 0.0;
if(dot_LN > 0.0){
vec3 H = normalize(L[i]+V);
float dot_HN = max(dot(H, N), 0.0);
specular = pow(dot_HN, uMaterial.shininess);
}
vec3 ambiantComponent = uMaterial.ambient * lights[i].ambient;
vec3 diffuseComponent = uMaterial.diffuse * vMaterialColor.xyz * dot_LN * lights[i].diffuse;
vec3 specularComponent = uMaterial.specular * specular * lights[i].specular;
color += ambiantComponent + diffuseComponent + specularComponent;
}
}
gl_FragColor = vec4(color, vMaterialColor.w);
}
I am sending the light informations with gl.uniformxx:
for(var i = 0 ; i < light.length ; i++){
gl.uniform3fv(shaderProgram.light[i].vector, light[i]._lightVector);
gl.uniform3fv(shaderProgram.light[i].ambient, light[i].ambient);
gl.uniform3fv(shaderProgram.light[i].specular, light[i].specular);
gl.uniform3fv(shaderProgram.light[i].diffuse, light[i].diffuse);
}
gl.uniform1i(shaderProgram.lightCount, light.length);
And shaderProgram is declared with the following code:
shaderProgram.light = [];
for(var i = 0 ; i < 3 ; i++){
var l = {};
l.ambient = gl.getUniformLocation(shaderProgram,"lights["+i+"].ambient");
l.specular = gl.getUniformLocation(shaderProgram,"lights["+i+"].specular");
l.vector = gl.getUniformLocation(shaderProgram,"lights["+i+"].vector");
l.diffuse = gl.getUniformLocation(shaderProgram,"lights["+i+"].diffuse");
shaderProgram.light.push(l);
}
shaderProgram.lightCount = gl.getUniformLocation(shaderProgram,"lightCount");
The rendered scene with Google Chrome is a black image, instead of Firefox which renders the scene as expected.
Do you know where is the problem in this code?
Edit:
The following minimal code presents the same bug:
<!doctype html>
<html>
<head>
<meta charset="UTF-8">
<title>Fail</title>
</head>
<script id="shader-fs" type="x-shader/x-fragment">
precision mediump float;
precision mediump int;
struct test_t{
vec3 a;
vec3 b;
vec3 c;
vec3 d;
};
uniform test_t test;
varying vec3 sum;
void main(void) {
gl_FragColor = vec4(test.a+test.b+test.c-sum, 1.0);
}
</script>
<script id="shader-vs" type="x-shader/x-vertex">
precision mediump float;
precision mediump int;
struct test_t{
vec3 a;
vec3 b;
vec3 c;
vec3 d;
};
attribute vec3 aVertexPosition;
uniform test_t test;
varying vec3 sum;
void main(void) {
sum = test.a + test.b;
gl_Position = vec4(aVertexPosition,1.0);
}
</script>
<script type="application/javascript">
var canvas, gl, shaderProgram, triangleVertexPositionBuffer;
function start() {
canvas = document.getElementById("webgl-canvas");
initWebGL(canvas); // Initialize the GL context
if (gl) {
gl.clearColor(0.0, 0.0, 0.0, 1.0); // Clear to black, fully opaque
gl.clearDepth(1.0); // Clear everything
gl.enable(gl.DEPTH_TEST); // Enable depth testing
gl.depthFunc(gl.LEQUAL); // Near things obscure far things
initShaders();
triangleVertexPositionBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, triangleVertexPositionBuffer);
var vertices = [
0.0, 0.5, -0.5,
-0.5, -0.5, -0.5,
0.5, -0.5, -0.5
];
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
triangleVertexPositionBuffer.itemSize = 3;
triangleVertexPositionBuffer.numItems = 3;
setInterval(drawScene, 15);
}
}
function initWebGL() {
gl = null;
try {
gl = canvas.getContext("experimental-webgl");
}
catch(e) {
}
if (!gl)
alert("Unable to initialize WebGL");
}
function initShaders() {
var fragmentShader = getShader(gl, "shader-fs");
var vertexShader = getShader(gl, "shader-vs");
shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertexShader);
gl.attachShader(shaderProgram, fragmentShader);
gl.linkProgram(shaderProgram);
if (!gl.getProgramParameter(shaderProgram, gl.LINK_STATUS)) {
alert("Unable to initialize the shader program.");
}
gl.useProgram(shaderProgram);
vertexPositionAttribute = gl.getAttribLocation(shaderProgram, "aVertexPosition");
gl.enableVertexAttribArray(vertexPositionAttribute);
shaderProgram.test = {}
shaderProgram.test.a = gl.getUniformLocation(shaderProgram,"test.a");
shaderProgram.test.b = gl.getUniformLocation(shaderProgram,"test.b");
shaderProgram.test.c = gl.getUniformLocation(shaderProgram,"test.c");
shaderProgram.test.d = gl.getUniformLocation(shaderProgram,"test.d");
}
function getShader(gl, id) {
var shaderScript = document.getElementById(id);
if (!shaderScript) {
return null;
}
var theSource = "";
var currentChild = shaderScript.firstChild;
while(currentChild) {
if (currentChild.nodeType == 3) {
theSource += currentChild.textContent;
}
currentChild = currentChild.nextSibling;
}
var shader;
if (shaderScript.type == "x-shader/x-fragment") {
shader = gl.createShader(gl.FRAGMENT_SHADER);
} else if (shaderScript.type == "x-shader/x-vertex") {
shader = gl.createShader(gl.VERTEX_SHADER);
} else {
return null; // Unknown shader type
}
gl.shaderSource(shader, theSource);
gl.compileShader(shader);
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
alert("An error occurred compiling the shaders: " + gl.getShaderInfoLog(shader));
return null;
}
return shader;
}
function drawScene() {
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.uniform3fv(shaderProgram.test.a, [1.0,0.0,0.0]);
gl.uniform3fv(shaderProgram.test.b, [0.0,1.0,0.0]);
gl.uniform3fv(shaderProgram.test.c, [0.0,0.0,1.0]);
gl.uniform3fv(shaderProgram.test.d, [1.0,1.0,1.0]);
gl.bindBuffer(gl.ARRAY_BUFFER, triangleVertexPositionBuffer);
gl.vertexAttribPointer(shaderProgram.vertexPositionAttribute, triangleVertexPositionBuffer.itemSize, gl.FLOAT, false, 0, 0);
gl.drawArrays(gl.TRIANGLES, 0, triangleVertexPositionBuffer.numItems);
}
</script>
<body onload="start()">
<canvas id="webgl-canvas" width="500" height="500"></canvas>
</body>
</html>
The triangle is yellow/white and blinking on Google Chrome, and blue on Mozilla firefox. The problem seems to be specific to windows platform (tested with windows 8.1 x64).
Edit: The issue has been reported to the chromium team, the fix should be developped soon.
I tested your minimal code with win7 Chrome and IE11 and win8 Chrome and IE11. And as you said, it is not working with win8 Chrome (but works with win7 Chrome).
I did a few modifications to find out what's wrong. In both vertex and fragment shader I see uniform test_t test has only property a = [1,0,0], but properties b, c, d seems to be [0,0,0], (or not being initialized so whatever values).
There are two tests:
https://www.khronos.org/registry/webgl/sdk/tests/conformance/glsl/misc/shader-with-array-of-structs-uniform.html
https://www.khronos.org/registry/webgl/sdk/tests/conformance/glsl/misc/shader-with-array-of-structs-containing-arrays.html
but both are passing in my chrome. It might be because they don't cover every possible setup and theirs target is vec4 only.
Temporary solution you can do (and is working for me) is to use vec4 instead of vec3. Or wait till the Chrome team fixes it.