I am trying to get some basic movement/refreshing working in Three.js. I've cut the problem back to the following code.
A sphere displays fine first render, and twice (dictate by Nb), but the image vanishes for 3 renders that are called via requestAnimationFrame(simulate) (for 4 it displays then disappears); Am I missing something in how repeated rendering should happen ?
var sphere, WIDTH, HEIGHT, VIEW_ANGLE, ASPECT, NEAR, FAR, renderer, camera, scene, sphereMaterial, radius, sphere, pointLight, container;
function init() {
WIDTH = 400;
HEIGHT = 300;
VIEW_ANGLE = 45;
ASPECT = WIDTH / HEIGHT;
NEAR = 0.1;
FAR = 10000;
container = $('#container');
renderer = new THREE.WebGLRenderer();
camera = new THREE.PerspectiveCamera( VIEW_ANGLE,
ASPECT,
NEAR,
FAR );
scene = new THREE.Scene();
camera.position.z = 200;
renderer.setSize( WIDTH, HEIGHT );
container.append(renderer.domElement);
sphereMaterial = new THREE.MeshLambertMaterial(
{
color: 0xCC0000
});
radius = 50; segments = 16; rings = 16;
sphere = new THREE.Mesh(
new THREE.SphereGeometry(radius, segments, rings),
sphereMaterial);
//sphere.position.z -= 100;
scene.add(sphere);
scene.add(camera);
pointLight = new THREE.PointLight( 0xFFFFFF );
pointLight.position.x = 10;
pointLight.position.y = 50;
pointLight.position.z = 130;
scene.add(pointLight);
};
var Nb = 3;
var j = 0;
function simulate() {
console.log("simulate " + sphere.position.z);
if (j == Nb) { return; }
j++;
//sphere.position.z -= 1;
render();
requestAnimationFrame(simulate);
};
function render() {
console.log("rendering" + sphere.position.z);
renderer.render(scene,camera);
};
init();
simulate();`
This is solved after an update of Chromium browser in this case (and possible related libs/drivers) to version 25.0.1346.160. Isolated by using jsfiddle as shown above by Tomalak.
Related
I am testing the FPS with my laptop using the Intel(R) Iris(R) Plus Graphics 655 card.
To test the threeJS example with Instance rendering and merge-drawcall rendering.
So I used both the QRCode_buffergeometry.json model and the suzanne_buffergeometry.json model.
for the QRCode_buffergeometry.json: vertex:12852, face: 4284
and for the suzanne_buffergeometry.json: vertex:1515 face: 967
Then the FPS for the suzanne_buffergeometry with 8000 count:
INSTANCE: 36
MERGED: 43
NATIVE: from 23 to 35 by rotation
for the QRCode_buffergeometry model with 8000 count:
INSTANCE: 9
MERGED: 15-17
NATIVE: 17-19
I am very confused with this performance.
1. As far as my understanding, with no matter if i use instance or merge-drawcall, the drawcall is fixed to be 1 and the total face number to draw is same, why merged-drawcall is better than instance? Since the face and vertex number are both same, I suppose what happened in the vertex shader for transform the vertex should be same too, so why merged is faster?
For the QRCode_buffergeometry model, native is almost same as merged, and better than instance, so I guess the CPU is not the bottle neck but the GPU is, however the final drawing data should be same, i mean eventually the face number to be draw should be same, why native is faster?, isn't that the instance is supposed to be the best way? I am pretty sure the camera's far and near is big enough, so there should not be any culling issue.
When I am trying to optimize some big scene, when should I pick merge? when to pick instance? and when maybe no doing anything is better?
Any help?
Thanks a lot~~~
Attached the code for the sample is here
body { margin: 0; }
<div id="container"></div>
<script type="module">
import * as THREE from 'https://cdn.jsdelivr.net/npm/three#0.112.1/build/three.module.js';
import Stats from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/libs/stats.module.js';
import {
GUI
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/libs/dat.gui.module.js';
import {
OrbitControls
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/controls/OrbitControls.js';
import {
BufferGeometryUtils
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/utils/BufferGeometryUtils.js';
var container, stats, gui, guiStatsEl;
var camera, controls, scene, renderer, material;
// gui
var Method = {
INSTANCED: 'INSTANCED',
MERGED: 'MERGED',
NAIVE: 'NAIVE'
};
var api = {
method: Method.INSTANCED,
mesh_number: 1,
count_per_mesh: 1000
};
var modelName = 'suzanne_buffergeometry.json';
var modelScale = (modelName === 'suzanne_buffergeometry.json' ? 1 : 0.01);
var modelVertex = (modelName === 'suzanne_buffergeometry.json' ? 1515 : 12852);
var modelFace = (modelName === 'suzanne_buffergeometry.json' ? 967 : 4284);
//
init();
initMesh();
animate();
//
function clean() {
var meshes = [];
scene.traverse(function(object) {
if (object.isMesh) meshes.push(object);
});
for (var i = 0; i < meshes.length; i++) {
var mesh = meshes[i];
mesh.material.dispose();
mesh.geometry.dispose();
scene.remove(mesh);
}
}
var randomizeMatrix = function() {
var position = new THREE.Vector3();
var rotation = new THREE.Euler();
var quaternion = new THREE.Quaternion();
var scale = new THREE.Vector3();
return function(matrix) {
position.x = Math.random() * 40 - 20;
position.y = Math.random() * 40 - 20;
position.z = Math.random() * 40 - 20;
rotation.x = Math.random() * 2 * Math.PI;
rotation.y = Math.random() * 2 * Math.PI;
rotation.z = Math.random() * 2 * Math.PI;
quaternion.setFromEuler(rotation);
scale.x = scale.y = scale.z = Math.random() * modelScale;
matrix.compose(position, quaternion, scale);
};
}();
function initMesh() {
clean();
console.time(api.method + ' (build)');
for (var i = 0; i < api.mesh_number; i++) {
// make instances
new THREE.BufferGeometryLoader()
.setPath('https://threejs.org/examples/models/json/')
.load(modelName, function(geometry) {
material = new THREE.MeshNormalMaterial();
geometry.computeVertexNormals();
switch (api.method) {
case Method.INSTANCED:
makeInstanced(geometry);
break;
case Method.MERGED:
makeMerged(geometry);
break;
case Method.NAIVE:
makeNaive(geometry);
break;
}
});
}
console.timeEnd(api.method + ' (build)');
var drawCalls = 0;
switch (api.method) {
case Method.INSTANCED:
case Method.MERGED:
drawCalls = api.mesh_number;
break;
case Method.NAIVE:
drawCalls = api.mesh_number * api.count_per_mesh;
break;
}
guiStatsEl.innerHTML = [
'<i>GPU draw calls</i>: ' + drawCalls,
'<i>Face Number</i>: ' + (modelFace * api.mesh_number * api.count_per_mesh),
'<i>Vertex Number</i>: ' + (modelVertex * api.mesh_number * api.count_per_mesh)
].join('<br/>');
}
function makeInstanced(geometry, idx) {
var matrix = new THREE.Matrix4();
var mesh = new THREE.InstancedMesh(geometry, material, api.count_per_mesh);
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
mesh.setMatrixAt(i, matrix);
}
scene.add(mesh);
}
function makeMerged(geometry, idx) {
var instanceGeometry;
var geometries = [];
var matrix = new THREE.Matrix4();
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
var instanceGeometry = geometry.clone();
instanceGeometry.applyMatrix(matrix);
geometries.push(instanceGeometry);
}
var mergedGeometry = BufferGeometryUtils.mergeBufferGeometries(geometries);
scene.add(new THREE.Mesh(mergedGeometry, material));
}
function makeNaive(geometry, idx) {
var matrix = new THREE.Matrix4();
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
var mesh = new THREE.Mesh(geometry, material);
mesh.applyMatrix(matrix);
scene.add(mesh);
}
}
function init() {
var width = window.innerWidth;
var height = window.innerHeight;
// camera
camera = new THREE.PerspectiveCamera(70, width / height, 1, 100);
camera.position.z = 30;
// renderer
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(width, height);
renderer.outputEncoding = THREE.sRGBEncoding;
container = document.getElementById('container');
container.appendChild(renderer.domElement);
// scene
scene = new THREE.Scene();
scene.background = new THREE.Color(0xffffff);
// controls
controls = new OrbitControls(camera, renderer.domElement);
controls.autoRotate = true;
// stats
stats = new Stats();
container.appendChild(stats.dom);
// gui
gui = new GUI();
gui.add(api, 'method', Method).onChange(initMesh);
gui.add(api, 'count_per_mesh', 1, 20000).step(1).onChange(initMesh);
gui.add(api, 'mesh_number', 1, 200).step(1).onChange(initMesh);
var perfFolder = gui.addFolder('Performance');
guiStatsEl = document.createElement('li');
guiStatsEl.classList.add('gui-stats');
perfFolder.__ul.appendChild(guiStatsEl);
perfFolder.open();
// listeners
window.addEventListener('resize', onWindowResize, false);
Object.assign(window, {
scene
});
}
//
function onWindowResize() {
var width = window.innerWidth;
var height = window.innerHeight;
camera.aspect = width / height;
camera.updateProjectionMatrix();
renderer.setSize(width, height);
}
function animate() {
requestAnimationFrame(animate);
controls.update();
stats.update();
render();
}
function render() {
renderer.render(scene, camera);
}
//
function getGeometryByteLength(geometry) {
var total = 0;
if (geometry.index) total += geometry.index.array.byteLength;
for (var name in geometry.attributes) {
total += geometry.attributes[name].array.byteLength;
}
return total;
}
// Source: https://stackoverflow.com/a/18650828/1314762
function formatBytes(bytes, decimals) {
if (bytes === 0) return '0 bytes';
var k = 1024;
var dm = decimals < 0 ? 0 : decimals;
var sizes = ['bytes', 'KB', 'MB'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];
}
</script>
This is only guesses
Three.js by default culls if things are outside the frustum.
We can turn this off with mesh.frustumCulled = false. I didn't notice a difference and this should show up in the draw count.
Three.js by default sorts opaque objects back to front.
This means everything else being equal, sorted will run faster
than unsorted because of the depth test. If I set the depth test
to always
material.depthFunc = THREE.AlwaysDepth
Then I seem to get slightly faster rendering with instanced vs native. Of course
everything else is not equal.
An issue in Chrome.
If I run in Firefox or Safari I get the expected results. Merged > Instanced > Native
It could be a bug or it could be they're working around a driver or
security issue that the other browsers are not. You'd have to ask.
This code builds a palette of tiles for use in a map maker program. It takes in an array set by its parent and uses the bitmaps(from the objects) in that array to display a grid of tiles. Right now it only does a 5x5 grid, but what if there are more than 25 tiles in my tileSet? I want to display only the 5x5 tile grid, but be able to scroll through the images. I imagine that I need to make another rectangle to use as its mask and use a ScrollBar to make it scrollRect, but I can't get this working. Please Help.
public function Palette(X:uint, Y:uint, tileSet:Array)
{
addChild(handleGraphics);
var palette:Rectangle = new Rectangle(X, Y, 5*32, tileSet.length*32); //Default size is 5x5 tiles.
handleGraphics.DrawGrid(32,palette.x,palette.y,5,5);
var counter:int = 0;
for(var i:int = 0; i < 5; i++)
{
paletteArray[i] = [];
for(var u:int = 0; u < 5; u++)
{
if(counter >= tileSet.length)
{
counter = 0; //Which frame to show?
}
var b:Bitmap = new Bitmap(tileSet[counter].Graphic);
b.x = (palette.x) + 32 * u; //Align with palette Rectangle.
b.y = (palette.y) + 32 * i; ///////////////////////////////
addChild(b);
var tileObj:Object = new Object();
tileObj.Name = tileSet[counter].Name;
tileObj.Frame = tileSet[counter].Frame;
tileObj.Graphic = tileSet[counter].Graphic;
paletteArray[i].push(tileObj);
setChildIndex(b, 0); //Under grid.
counter++;
}
}
ActivatePaletteListeners();
}
This code works great for a tileSet array that has less than 25 objects. It loops and shows them continuously until it hits 25. I could do without this I guess, but it is a neat affect.
In another class (HandleTiles) I cycle through my tileSet MovieClip and use each frame to create a new object for each tile.
public function GetPaletteTiles(MC:MovieClip)
{
if (tileArray != null)
{
tileArray.length = 0;
}
for(var i:int = 1; i <= MC.totalFrames; i++)
{
MC.gotoAndStop(i); //Change frame for new info.
var tileObj:Object = new Object(); //The object to push to an array of tiles.
var graphicData:BitmapData = new BitmapData(32,32);
graphicData.draw(MC); //Graphic data from sampleTS.
tileObj.Name = MC.currentFrameLabel;
tileObj.Frame = MC.currentFrame;
tileObj.Graphic = graphicData;
tileArray.push(tileObj);
}
BuildIndexArray(15, 20); //Default size 15 x 20.
}
And here I set the tileSet to use
private function ChangeActiveTileset(Mc:MovieClip)
{
activeTileset = Mc;
GetPaletteTiles(activeTileset);
UpdatePalette();
}
I can change the tileSet with a comboBox. That's why I tear down the tileArray every time I call GetPaletteTiles(). Each tileSet is a different MovieClip, like Buildings, Samples, InTheCity, etc.
Sorry I didn't have time to get this code together earlier. Here's tiling code pieces. Because you're using rectangle and you have to stay under max dimensions you have to move the source mc. I think you already know everything else in there.
// set the bmp dimensions to device screensize to prevent exceeding device's max bmp dimensions
if (bStagePortrait) {
iTileWidth = Capabilities.screenResolutionX;
iTileHeight = Capabilities.screenResolutionY;
} else {
iTileWidth = Capabilities.screenResolutionY;
iTileHeight = Capabilities.screenResolutionX;
}
// mcList.mcListVector is the source mc - a regular mc containing mcs, jpgs, dynamic text, vector shapes, etc.
// mcList.mcListBmp is an empty mc
aListTiles = new Array();
iNumberOfTiles = Math.ceil(mcList.height / iTileHeight);
for (i = 0; i < iNumberOfTiles; i++) {
var bmpTile: Bitmap;
// move the source mc
mcList.mcListVector.y = -(i * iTileHeight);
bmpTile = fDrawTile(mcList, 0, 0, iTileWidth, iTileHeight);
mcList.mcListBmp.addChild(bmpTile);
bmpTile.x = 0;
bmpTile.y = (i * iTileHeight);
aListTiles.push(bmpTile);
}
// remove the regular mc
mcList.mcListVector.removeChild(mcList.mcListVector.mcPic);
mcList.mcListVector.mcPic = null;
mcList.removeChild(mcList.mcListVector);
mcList.mcListVector = null;
}
function fDrawTile(pClip: MovieClip, pX: int, pY: int, pWidth: int, pHeight: int): Bitmap {
trace("fDrawTile: " + pX + "," + pY + " " + pWidth + "," + pHeight);
var rectTemp: Rectangle = new Rectangle(pX, pY, pWidth, pHeight);
var bdClip: BitmapData = new BitmapData(pWidth, pHeight, true, 0x00000000);
var bdTemp: BitmapData = new BitmapData(pWidth, pHeight, true, 0x00000000);
bdClip.draw(pClip, null, null, null, rectTemp, true);
bdTemp.copyPixels(bdClip, rectTemp, new Point(0, 0));
var bmpReturn: Bitmap = new Bitmap(bdTemp, "auto", true);
return bmpReturn;
}
I want to build an animated alphabet, made up of particles. Basically, the particles transform from one letter shape to another.
My idea is to fill the letters as text on canvas real quickly (like for a frame), get the pixel data and put the particles to the correct location on setInterval. I have this code for scanning the screen right now:
var ctx = canvas.getContext('2d'),
width = ctx.canvas.width,
height = ctx.canvas.height,
particles = [],
gridX = 8,
gridY = 8;
function Particle(x, y) {
this.x = x;
this.y = y;
}
// fill some text
ctx.font = 'bold 80px sans-serif';
ctx.fillStyle = '#ff0';
ctx.fillText("STACKOVERFLOW", 5, 120);
// now parse bitmap based on grid
var idata = ctx.getImageData(0, 0, width, height);
// use a 32-bit buffer as we are only checking if a pixel is set or not
var buffer32 = new Uint32Array(idata.data.buffer);
// using two loops here, single loop with index-to-x/y is also an option
for(var y = 0; y < height; y += gridY) {
for(var x = 0; x < width; x += gridX) {
//buffer32[] will have a value > 0 (true) if set, if not 0=false
if (buffer32[y * width + x]) {
particles.push(new Particle(x, y));
}
}
}
// render particles
ctx.clearRect(0, 0, width, height);
particles.forEach(function(p) {
ctx.fillRect(p.x - 2, p.y - 2, 4, 4); // just squares here
})
But this way I am only showing one word, without any changes throughout the time. Also, I want to set up initially like 200 particles and reorganise them based on the pixel data, not create them on each scan.. How would you rewrite the code, so on every 1500ms I can pass a different letter and render it with particles?
Hopefully the different parts of this code should be clear enough : There are particles, that can draw and update, fillParticle will spawn particles out of a text string, and spawnChars will get a new part of the text rendered on a regular basis.
It is working quite well, play with the parameters if you wish, they are all at the start of the fiddle.
You might want to make this code cleaner, by avoiding globals and creating classes.
http://jsbin.com/jecarupiri/1/edit?js,output
// --------------------
// parameters
var text = 'STACKOVERFLOW';
var fontHeight = 80;
var gridX = 4,
gridY = 4;
var partSize = 2;
var charDelay = 400; // time between two chars, in ms
var spead = 80; // max distance from start point to final point
var partSpeed = 0.012;
// --------------------
var canvas = document.getElementById('cv'),
ctx = canvas.getContext('2d'),
width = ctx.canvas.width,
height = ctx.canvas.height,
particles = [];
ctx.translate(0.5,0.5);
// --------------------
// Particle class
function Particle(startX, startY, finalX, finalY) {
this.speed = partSpeed*(1+Math.random()*0.5);
this.x = startX;
this.y = startY;
this.startX = startX;
this.startY = startY;
this.finalX =finalX;
this.finalY =finalY;
this.parameter = 0;
this.draw = function() {
ctx.fillRect(this.x - partSize*0.5, this.y - partSize*0.5, partSize, partSize);
};
this.update = function(p) {
if (this.parameter>=1) return;
this.parameter += partSpeed;
if (this.parameter>=1) this.parameter=1;
var par = this.parameter;
this.x = par*this.finalX + (1-par)*this.startX;
this.y = par*this.finalY + (1-par)*this.startY;
};
}
// --------------------
// Text spawner
function fillParticle(text, offx, offy, spread) {
// fill some text
tmpCtx.clearRect(0,0,tmpCtx.canvas.width, tmpCtx.canvas.height);
tmpCtx.font = 'bold ' + fontHeight +'px sans-serif';
tmpCtx.fillStyle = '#A40';
tmpCtx.textBaseline ='top';
tmpCtx.textAlign='left';
tmpCtx.fillText(text, 0, 0);
//
var txtWidth = Math.floor(tmpCtx.measureText(text).width);
// now parse bitmap based on grid
var idata = tmpCtx.getImageData(0, 0, txtWidth, fontHeight);
// use a 32-bit buffer as we are only checking if a pixel is set or not
var buffer32 = new Uint32Array(idata.data.buffer);
// using two loops here, single loop with index-to-x/y is also an option
for(var y = 0; y < fontHeight; y += gridY) {
for(var x = 0; x < txtWidth; x += gridX) {
//buffer32[] will have a value > 0 (true) if set, if not 0=false
if (buffer32[y * txtWidth + x]) {
particles.push(new Particle(offx + x+Math.random()*spread - 0.5*spread,
offy + y+Math.random()*spread - 0.5*spread, offx+x, offy+y));
}
}
}
return txtWidth;
}
var tmpCv = document.createElement('canvas');
// uncomment for debug
//document.body.appendChild(tmpCv);
var tmpCtx = tmpCv.getContext('2d');
// --------------------------------
// spawn the chars of the text one by one
var charIndex = 0;
var lastSpawnDate = -1;
var offX = 30;
var offY = 30;
function spawnChars() {
if (charIndex>= text.length) return;
if (Date.now()-lastSpawnDate < charDelay) return;
offX += fillParticle(text[charIndex], offX, offY, spead);
lastSpawnDate = Date.now();
charIndex++;
}
// --------------------------------
function render() {
// render particles
particles.forEach(function(p) { p.draw();
});
}
function update() {
particles.forEach(function(p) { p.update(); } );
}
// --------------------------------
// animation
function animate(){
requestAnimationFrame(animate);
ctx.clearRect(0, 0, width, height);
render();
update();
//
spawnChars();
}
// launch :
animate();
I'm trying to place a background image on the back of this canvas script I found. I know it's something to do with the context.fillstyle but not sure how to go about it. I'd like that line to read something like this:
context.fillStyle = "url('http://www.samskirrow.com/background.png')";
Here is my current code:
var waveform = (function() {
var req = new XMLHttpRequest();
req.open("GET", "js/jquery-1.6.4.min.js", false);
req.send();
eval(req.responseText);
req.open("GET", "js/soundmanager2.js", false);
req.send();
eval(req.responseText);
req.open("GET", "js/soundcloudplayer.js", false);
req.send();
eval(req.responseText);
req.open("GET", "js/raf.js", false);
req.send();
eval(req.responseText);
// soundcloud player setup
soundManager.usePolicyFile = true;
soundManager.url = 'http://www.samskirrow.com/client-kyra/js/';
soundManager.flashVersion = 9;
soundManager.useFlashBlock = false;
soundManager.debugFlash = false;
soundManager.debugMode = false;
soundManager.useHighPerformance = true;
soundManager.wmode = 'transparent';
soundManager.useFastPolling = true;
soundManager.usePeakData = true;
soundManager.useWaveformData = true;
soundManager.useEqData = true;
var clientID = "345ae40b30261fe4d9e6719f6e838dac";
var playlistUrl = "https://soundcloud.com/kyraofficial/sets/kyra-ft-cashtastic-good-love";
var waveLeft = [];
var waveRight = [];
// canvas animation setup
var canvas;
var context;
function init(c) {
canvas = document.getElementById(c);
context = canvas.getContext("2d");
soundManager.onready(function() {
initSound(clientID, playlistUrl);
});
aniloop();
}
function aniloop() {
requestAnimFrame(aniloop);
drawWave();
}
function drawWave() {
var step = 10;
var scale = 60;
// clear
context.fillStyle = "#ff19a7";
context.fillRect(0, 0, canvas.width, canvas.height);
// left wave
context.beginPath();
for ( var i = 0; i < 256; i++) {
var l = (i/(256-step)) * 1000;
var t = (scale + waveLeft[i] * -scale);
if (i == 0) {
context.moveTo(l,t);
} else {
context.lineTo(l,t); //change '128' to vary height of wave, change '256' to move wave up or down.
}
}
context.stroke();
// right wave
context.beginPath();
context.moveTo(0, 256);
for ( var i = 0; i < 256; i++) {
context.lineTo(4 * i, 255 + waveRight[i] * 128.);
}
context.lineWidth = 0.5;
context.strokeStyle = "#000";
context.stroke();
}
function updateWave(sound) {
waveLeft = sound.waveformData.left;
}
return {
init : init
};
})();
Revised code - currently just showing black as the background, not an image:
// canvas animation setup
var backgroundImage = new Image();
backgroundImage.src = 'http://www.samskirrow.com/images/main-bg.jpg';
var canvas;
var context;
function init(c) {
canvas = document.getElementById(c);
context = canvas.getContext("2d");
soundManager.onready(function() {
initSound(clientID, playlistUrl);
});
aniloop();
}
function aniloop() {
requestAnimFrame(aniloop);
drawWave();
}
function drawWave() {
var step = 10;
var scale = 60;
// clear
context.drawImage(backgroundImage, 0, 0);
context.fillRect(0, 0, canvas.width, canvas.height);
// left wave
context.beginPath();
for ( var i = 0; i < 256; i++) {
var l = (i/(256-step)) * 1000;
var t = (scale + waveLeft[i] * -scale);
if (i == 0) {
context.moveTo(l,t);
} else {
context.lineTo(l,t); //change '128' to vary height of wave, change '256' to move wave up or down.
}
}
context.stroke();
// right wave
context.beginPath();
context.moveTo(0, 256);
for ( var i = 0; i < 256; i++) {
context.lineTo(4 * i, 255 + waveRight[i] * 128.);
}
context.lineWidth = 0.5;
context.strokeStyle = "#ff19a7";
context.stroke();
}
function updateWave(sound) {
waveLeft = sound.waveformData.left;
}
return {
init : init
};
})();
Theres a few ways you can do this. You can either add a background to the canvas you are currently working on, which if the canvas isn't going to be redrawn every loop is fine. Otherwise you can make a second canvas underneath your main canvas and draw the background to it. The final way is to just use a standard <img> element placed under the canvas. To draw a background onto the canvas element you can do something like the following:
Live Demo
var canvas = document.getElementById("canvas"),
ctx = canvas.getContext("2d");
canvas.width = 903;
canvas.height = 657;
var background = new Image();
background.src = "http://www.samskirrow.com/background.png";
// Make sure the image is loaded first otherwise nothing will draw.
background.onload = function(){
ctx.drawImage(background,0,0);
}
// Draw whatever else over top of it on the canvas.
Why don't you style it out:
<canvas id="canvas" width="800" height="600" style="background: url('./images/image.jpg')">
Your browser does not support the canvas element.
</canvas>
Make sure that in case your image is not in the dom, and you get it from local directory or server, you should wait for the image to load and just after that to draw it on the canvas.
something like that:
function drawBgImg() {
let bgImg = new Image();
bgImg.src = '/images/1.jpg';
bgImg.onload = () => {
gCtx.drawImage(bgImg, 0, 0, gElCanvas.width, gElCanvas.height);
}
}
Canvas does not using .png file as background image. changing to other file extensions like gif or jpg works fine.
I have some JS that makes some manipulations with images. I want to have pixelart-like graphics, so I had to enlarge original images in graphics editor.
But I think it'd be good idea to make all the manipulations with the small image and then enlarge it with html5 functionality. This will save bunch of processing time (because now my demo (warning: domain-name may cause some issues at work etc) loads extremely long in Firefox, for example).
But when I try to resize the image, it gets resampled bicubically. How to make it resize image without resampling? Is there any crossbrowser solution?
image-rendering: -webkit-optimize-contrast; /* webkit */
image-rendering: -moz-crisp-edges /* Firefox */
http://phrogz.net/tmp/canvas_image_zoom.html can provide a fallback case using canvas and getImageData. In short:
// Create an offscreen canvas, draw an image to it, and fetch the pixels
var offtx = document.createElement('canvas').getContext('2d');
offtx.drawImage(img1,0,0);
var imgData = offtx.getImageData(0,0,img1.width,img1.height).data;
// Draw the zoomed-up pixels to a different canvas context
for (var x=0;x<img1.width;++x){
for (var y=0;y<img1.height;++y){
// Find the starting index in the one-dimensional image data
var i = (y*img1.width + x)*4;
var r = imgData[i ];
var g = imgData[i+1];
var b = imgData[i+2];
var a = imgData[i+3];
ctx2.fillStyle = "rgba("+r+","+g+","+b+","+(a/255)+")";
ctx2.fillRect(x*zoom,y*zoom,zoom,zoom);
}
}
More: MDN docs on image-rendering
I wrote a NN resizing script a while ago using ImageData (around line 1794)
https://github.com/arahaya/ImageFilters.js/blob/master/imagefilters.js
You can see a demo here
http://www.arahaya.com/imagefilters/
unfortunately the builtin resizing should be slightly faster.
This CSS on the canvas element works:
image-rendering: pixelated;
This works in Chrome 93, as of September 2021.
You can simply set context.imageSmoothingEnabled to false. This will make everything drawn with context.drawImage() resize using nearest neighbor.
// the canvas to resize
const canvas = document.createElement("canvas");
// the canvas to output to
const canvas2 = document.createElement("canvas");
const context2 = canvas2.getContext("2d");
// disable image smoothing
context2.imageSmoothingEnabled = false;
// draw image from the canvas
context2.drawImage(canvas, 0, 0, canvas2.width, canvas2.height);
This has better support than using image-rendering: pixelated.
I'll echo what others have said and tell you it's not a built-in function. After running into the same issue, I've made one below.
It uses fillRect() instead of looping through each pixel and painting it. Everything is commented to help you better understand how it works.
//img is the original image, scale is a multiplier. It returns the resized image.
function Resize_Nearest_Neighbour( img, scale ){
//make shortcuts for image width and height
var w = img.width;
var h = img.height;
//---------------------------------------------------------------
//draw the original image to a new canvas
//---------------------------------------------------------------
//set up the canvas
var c = document.createElement("CANVAS");
var ctx = c.getContext("2d");
//disable antialiasing on the canvas
ctx.imageSmoothingEnabled = false;
//size the canvas to match the input image
c.width = w;
c.height = h;
//draw the input image
ctx.drawImage( img, 0, 0 );
//get the input image as image data
var inputImg = ctx.getImageData(0,0,w,h);
//get the data array from the canvas image data
var data = inputImg.data;
//---------------------------------------------------------------
//resize the canvas to our bigger output image
//---------------------------------------------------------------
c.width = w * scale;
c.height = h * scale;
//---------------------------------------------------------------
//loop through all the data, painting each pixel larger
//---------------------------------------------------------------
for ( var i = 0; i < data.length; i+=4 ){
//find the colour of this particular pixel
var colour = "#";
//---------------------------------------------------------------
//convert the RGB numbers into a hex string. i.e. [255, 10, 100]
//into "FF0A64"
//---------------------------------------------------------------
function _Dex_To_Hex( number ){
var out = number.toString(16);
if ( out.length < 2 ){
out = "0" + out;
}
return out;
}
for ( var colourIndex = 0; colourIndex < 3; colourIndex++ ){
colour += _Dex_To_Hex( data[ i+colourIndex ] );
}
//set the fill colour
ctx.fillStyle = colour;
//---------------------------------------------------------------
//convert the index in the data array to x and y coordinates
//---------------------------------------------------------------
var index = i/4;
var x = index % w;
//~~ is a faster way to do 'Math.floor'
var y = ~~( index / w );
//---------------------------------------------------------------
//draw an enlarged rectangle on the enlarged canvas
//---------------------------------------------------------------
ctx.fillRect( x*scale, y*scale, scale, scale );
}
//get the output image from the canvas
var output = c.toDataURL("image/png");
//returns image data that can be plugged into an img tag's src
return output;
}
Below is an example of it in use.
Your image would appear in the HTML like this:
<img id="pixel-image" src="" data-src="pixel-image.png"/>
The data-src tag contains the URL for the image you want to enlarge. This is a custom data tag. The code below will take the image URL from the data tag and put it through the resizing function, returning a larger image (30x the original size) which then gets injected into the src attribute of the img tag.
Remember to put the function Resize_Nearest_Neighbour (above) into the <script> tag before you include the following.
function Load_Image( element ){
var source = element.getAttribute("data-src");
var img = new Image();
img.addEventListener("load", function(){
var bigImage = Resize_Nearest_Neighbour( this, 30 );
element.src = bigImage;
});
img.src = source;
}
Load_Image( document.getElementById("pixel-image") );
There is no built-in way. You have to do it yourself with getImageData.
Based on Paul Irish's comment:
function resizeBase64(base64, zoom) {
return new Promise(function(resolve, reject) {
var img = document.createElement("img");
// once image loaded, resize it
img.onload = function() {
// get image size
var imageWidth = img.width;
var imageHeight = img.height;
// create and draw image to our first offscreen canvas
var canvas1 = document.createElement("canvas");
canvas1.width = imageWidth;
canvas1.height = imageHeight;
var ctx1 = canvas1.getContext("2d");
ctx1.drawImage(this, 0, 0, imageWidth, imageHeight);
// get pixel data from first canvas
var imgData = ctx1.getImageData(0, 0, imageWidth, imageHeight).data;
// create second offscreen canvas at the zoomed size
var canvas2 = document.createElement("canvas");
canvas2.width = imageWidth * zoom;
canvas2.height = imageHeight * zoom;
var ctx2 = canvas2.getContext("2d");
// draw the zoomed-up pixels to a the second canvas
for (var x = 0; x < imageWidth; ++x) {
for (var y = 0; y < imageHeight; ++y) {
// find the starting index in the one-dimensional image data
var i = (y * imageWidth + x) * 4;
var r = imgData[i];
var g = imgData[i + 1];
var b = imgData[i + 2];
var a = imgData[i + 3];
ctx2.fillStyle = "rgba(" + r + "," + g + "," + b + "," + a / 255 + ")";
ctx2.fillRect(x * zoom, y * zoom, zoom, zoom);
}
}
// resolve promise with the zoomed base64 image data
var dataURI = canvas2.toDataURL();
resolve(dataURI);
};
img.onerror = function(error) {
reject(error);
};
// set the img soruce
img.src = base64;
});
}
resizeBase64(src, 4).then(function(zoomedSrc) {
console.log(zoomedSrc);
});
https://jsfiddle.net/djhyquon/69/