Emscripten pass uint8_t array to javascript? - html

Trying to display a uint8_t* rgb image data buffer to an HTML canvas that was process in C via WASM.
In C I have the following external method:
extern void JS_DisplayRenderData(uint8_t* data, int dataLength);
Then I call the extrnal method like so:
int size = 1280 * 720 * 3;
uint8_t data[size];
memset(data, 255, size);
JS_DisplayRenderData(data, size);
In javaScript I then try to display the buffer like so:
if (typeof mergeInto !== 'undefined') mergeInto(LibraryManager.library,
{
JS_DisplayRenderData: function(data, dataLength)
{
alert("Data Length: " + dataLength);
var c = document.getElementById("canvas");
var ctx = c.getContext("2d");
var imgdata = ctx.getImageData(0, 0, c.width, c.height);
var imgdatalen = imgdata.data.length;
var i2 = 0;
for (var i = 0; i < (imgdatalen / 4); i++)
{
imgdata.data[4*i] = data[i2]; // RED (0-255)
imgdata.data[4*i+1] = data[i2+1]; // GREEN (0-255)
imgdata.data[4*i+2] = data[i2+2]; // BLUE (0-255)
imgdata.data[4*i+3] = 255; // APLHA (0-255)
i2 += 3;
}
ctx.putImageData(imgdata, 0, 0);
}
});
However all I get are black pixels even though it should all be white.

Found the answer tnx to: Struct operations in Javascript through Emscripten
Just needed to state what the buffer type was via: "var a = HEAPU8.subarray(data);"
The method I'm using to invoke JS via C can be found: Webassembly calling JavaScript methods from wasm i.e. whithin c++ code

Related

Is it possible to create a multichannel `MediaElementAudioSourceNode` from a multichannel `HTMLAudioElement`?

It's possible to load a multichannel WAV (i.e. first order ambisonics) file using an html <audio> element, and create a MediaElementAudioSourceNode from that element.
Regarding the channelCount property of that MediaElementAudioSourceNode, the documentation states that this will depend on the HTMLMediaElement the node was created from, but when trying this the channel count is always 2.
Is it possible to create a node that has the channel count corresponding to the number of channels of the loaded file ?
Best,
N
To answer my own question, it seems like all tracks are loaded and played back, you just need to adjust some values. I set the channel count to 4 and channel interpretation to discrete, then used a channel splitter to route the channels individually to an analyzer, e voila, I can see all the different waveforms. Example below.
<html>
<body>
<h1> Multitrack Experiment </h1>
<p>This is a little experiment to see if all channels of a multichannel audio file are played back,
which they are.</p>
<p>The audio file contains four different waveforms on four channels. (0 -> sine, 1 -> saw, 2 -> square, 3 -> noise)</p>
<audio src="tracktest.wav"></audio>
<button>
<span>Play/Pause</span>
</button>
<br/>
<br/>
<canvas id="oscilloscope"></canvas>
</body>
<script>
// for legacy browsers
//const AudioContext = window.AudioContext || window.webkitAudioContext;
var playing = false;
const audioContext = new AudioContext();
// get the audio element
const audioElement = document.querySelector('audio');
// pass it into the audio context
// configure the mediaelement source correctly
// (otherwise it still shows 2 channels)
// also change channel interpretation while you're at it ...
const track = audioContext.createMediaElementSource(audioElement);
track.outputChannels = 4;
track.channelInterpretation = 'discrete';
// just for monitoring purposes
track.connect(audioContext.destination);
// split channels to be able to
const splitter = audioContext.createChannelSplitter(4);
track.connect(splitter);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
// uncomment the different options to see the different results ...
splitter.connect(analyser, 0, 0);
//splitter.connect(analyser, 1, 0);
//splitter.connect(analyser, 2, 0);
//splitter.connect(analyser, 3, 0);
// select our play button
const playButton = document.querySelector('button');
playButton.addEventListener('click', function() {
// check if context is in suspended state (autoplay policy)
if (audioContext.state === 'suspended') {
audioContext.resume();
}
console.log(track)
// play or pause track depending on state
if (!playing) {
console.log("play");
audioElement.play();
playing = true;
} else {
console.log("stop");
audioElement.pause();
playing = false;
}
}, false);
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
analyser.getByteTimeDomainData(dataArray);
// Get a canvas defined with ID "oscilloscope"
const canvas = document.getElementById("oscilloscope");
const canvasCtx = canvas.getContext("2d");
// draw an oscilloscope of the current audio source
function draw() {
requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillStyle = "rgb(200, 200, 200)";
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "rgb(0, 0, 0)";
canvasCtx.beginPath();
const sliceWidth = canvas.width * 1.0 / bufferLength;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
const v = dataArray[i] / 128.0;
const y = v * canvas.height / 2;
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
}
draw();
</script>
</html>

Displaying rgb8 pixel data in-browser

I have found similar questions to mine on SO, but have not yet come across an answer to this problem. I have a rgb8 encoded image that I am trying to display in-browser, either in an img or canvas element. I am unsure how to convert this pixel data into an image properly, and was looking for any insight.
For context, the source of this rgb8 data is from a ROS topic with type sensor_msgs/Image. When subscribing to this topic using roslibjs, I am given the following object:
{
data: “MT4+CR…”, (of length 1228800)
encoding: "rgb8",
header: {
frame_id: “camera_color_optical_frame”,
seq: 1455,
stamp: ...timestamp info
},
height: 480,
is_bigendian: 0,
step: 1920,
width: 640
}
With the data string, I have tried displaying it on canvas, converting it to base64, etc. but have not been able to. I know about web_video_server in ROS to help send these images over a port, but that is not an option for me unfortunately - I need to work directly with the data.
Is there a way I can go about displaying this rgb8 data in the browser? Based on the documentation on here, data should be represented as a uint8[] (if that helps).
Thank you so much!
First create a canvas of the correct size and obtain a CanvasRenderingContext2D
// Assuming that imgMes is the image message as linked in question
const can = document.createElement("canvas");
can.width = imgMes.width;
can.height = imgMes.height;
const ctx = can.getcontext("2d");
Then create an image buffer to hold the pixels
const imgData = ctx.createImageData(0, 0, imgMes.width, imgMes.height);
const data = imgData.data;
const inData = imgMes.data;
Then read the data from the image message. Making sure to use the correct order as defined in the flag is_bigendian
var i = 0, j, y = 0, x;
while (y < imgMes.height) {
j = y * imgMes.step;
for (x = 0; x < imgMes.width; x ++) {
if (imgMes.is_bigendian) {
data[i] = inData[j]; // red
data[i + 1] = inData[j + 1]; // green
data[i + 2] = inData[j + 2]; // blue
} else {
data[i + 2] = inData[j]; // blue
data[i + 1] = inData[j + 1]; // green
data[i] = inData[j + 2]; // red
}
data[i + 3] = 255; // alpha
i += 4;
j += 3;
}
y++;
}
The put the pixel data into the canvas;
ctx.putImageData(imgData, 0, 0);
And add the canvas to your HTML
document.body.appendChild(can);
And you are done.
Note that I may have is_bigendian the wrong way around. If so just change the line if (imgMes.is_bigendian) { to if (!imgMes.is_bigendian) {
UPDATE
With more information regarding the data format i was able to extract the image.
I used atob to decode the Base64 string. This returns another string. I then iterated each character in the string, getting the character code to add to each pixel.
It is unclear where the endianess is. My guess is that it is in the decoded string and thus the code swaps bytes for each char code as it makes no sense to have endianess on multiples of 3 bytes
const can = document.createElement("canvas");
can.width = imgMes.width;
can.height = imgMes.height;
const ctx = can.getContext("2d");
const imgData = ctx.createImageData(imgMes.width, imgMes.height);
const data = imgData.data;
const inData = atob(imgMes.data);
var j = 0; i = 4; // j data in , i data out
while( j < inData.length) {
const w1 = inData.charCodeAt(j++); // read 3 16 bit words represent 1 pixel
const w2 = inData.charCodeAt(j++);
const w3 = inData.charCodeAt(j++);
if (!imgMes.is_bigendian) {
data[i++] = w1; // red
data[i++] = w2; // green
data[i++] = w3; // blue
} else {
data[i++] = (w1 >> 8) + ((w1 & 0xFF) << 8);
data[i++] = (w2 >> 8) + ((w2 & 0xFF) << 8);
data[i++] = (w3 >> 8) + ((w3 & 0xFF) << 8);
}
data[i++] = 255; // alpha
}
ctx.putImageData(imgData, 0, 0);
document.body.appendChild(can);
From the example data I got an image of some paving near a road.

Check if image A exists in image B

I need to check if an image exists in another image using JavaScript, I need to know what are the best approaches (algorithm) and solutions (ex: librarie) to do this operations
I explained what I need to do in this image:
Using the GPU to help in image processing.
Using the 2D API and some simple tricks you can exploit the GPUs power to speed up Javascript.
Difference
To find an image you need to compare the pixels you are looking for (A) against the pixels in the image (B). If the difference between the Math.abs(A-B) === 0 then the pixels are the same.
A function to do this may look like the following
function findDif(imageDataSource, imageDataDest, xx,yy)
const ds = imageDataSource.data;
const dd = imageDataDest.data;
const w = imageDataSource.width;
const h = imageDataSource.height;
var x,y;
var dif = 0;
for(y = 0; y < h; y += 1){
for(x = 0; x < w; x += 1){
var indexS = (x + y * w) * 4;
var indexD = (x + xx + (y + yy) * imageDataDest.width) * 4;
dif += Math.abs(ds[indexS]-dd[indexD]);
dif += Math.abs(ds[indexS + 1]-dd[indexD + 1]);
dif += Math.abs(ds[indexS + 2]-dd[indexD + 2]);
}
}
return dif;
}
var source = sourceCanvas.getContext("2d").getImageData(0,0,sourceCanvas.width,sourceCanvas.height);
var dest = destinationCanvas.getContext("2d").getImageData(0,0,destinationCanvas.width,destinationCanvas.height);
if(findDif(source,dest,100,100)){ // is the image at 100,100?
// Yes image is very similar
}
Where the source is the image we are looking for and the dest is the image we want to find it in. We run the function for every location that the image may be and if the result is under a level then its a good chance we have found it.
But this is very very slow in JS. This is where the GPU can help.
Using the ctx.globalCompositeOperation = "difference"; operation we can speed up the process as it will do the difference calculation for us
When you render with the comp operation "difference" the resulting pixels are the difference between the pixels you are drawing and those that are already on the canvas. Thus if you draw on something that is the same the result is all pixels are black (no difference)
To find a similar image in the image you render the image you are testing for at each location on the canvas that you want to test for. Then you get the sum of all the pixels you just rendered on, if the result is under a threshold that you have set then the image under that area is very similar to the image you are testing for.
But we still need to count all the pixels one by one.
A GPU mean function
The comp op "difference" already does the pixel difference calculation for you, but to get the sum you can use the inbuilt image smoothing.
After you have rendered to find the difference you take that area and render it at a smaller scale with ctx.imageSmoothingEnabled = true the default setting. The GPU will do something similar to an average and can reduce the amount of work JS has to do by several orders of magnitude.
Now instead of 100s or 1000s of pixels you can reduce it down to as little at 4 or 16 depending on the accuracy you need.
An example.
Using these methods you can get a near realtime image in image search with just the basic numerical analysis.
Click to start a test. Results are shown plus the time it took. The image that is being searched for is in the top right.
//------------------------------------------------------------------------
// Some helper functions
var imageTools = (function () {
var tools = {
canvas(width, height) { // create a blank image (canvas)
var c = document.createElement("canvas");
c.width = width;
c.height = height;
return c;
},
createImage : function (width, height) {
var i = this.canvas(width, height);
i.ctx = i.getContext("2d");
return i;
},
image2Canvas(img) {
var i = this.canvas(img.width, img.height);
i.ctx = i.getContext("2d");
i.ctx.drawImage(img, 0, 0);
return i;
},
copyImage(img){ // just a named stub
return this.image2Canvas(img);
},
};
return tools;
})();
const U = undefined;
const doFor = (count, callback) => {var i = 0; while (i < count && callback(i ++) !== true ); };
const setOf = (count, callback) => {var a = [],i = 0; while (i < count) { a.push(callback(i ++)) } return a };
const randI = (min, max = min + (min = 0)) => (Math.random() * (max - min) + min) | 0;
const rand = (min, max = min + (min = 0)) => Math.random() * (max - min) + min;
const randA = (array) => array[(Math.random() * array.length) | 0];
const randG = (min, max = min + (min = 0)) => Math.random() * Math.random() * Math.random() * Math.random() * (max - min) + min;
// end of helper functions
//------------------------------------------------------------------------
function doit(){
document.body.innerHTML = ""; // clear the page;
var canvas = document.createElement("canvas");
document.body.appendChild(canvas);
var ctx = canvas.getContext("2d");
// a grid of 36 images
canvas.width = 6 * 64;
canvas.height = 6 * 64;
console.log("test");
// get a random character to look for
const digit = String.fromCharCode("A".charCodeAt(0) + randI(26));
// get some characters we dont want
const randomDigits = setOf(6,i=>{
return String.fromCharCode("A".charCodeAt(0) + randI(26));
})
randomDigits.push(digit); // add the image we are looking for
var w = canvas.width;
var h = canvas.height;
// create a canvas for the image we are looking for
const imageToFind = imageTools.createImage(64,64);
// and a larger one to cover pixels on the sides
const imageToFindExtend = imageTools.createImage(128,128);
// Draw the character onto the image with a white background and scaled to fit
imageToFindExtend.ctx.fillStyle = imageToFind.ctx.fillStyle = "White";
imageToFind.ctx.fillRect(0,0,64,64);
imageToFindExtend.ctx.fillRect(0,0,128,128);
ctx.font = imageToFind.ctx.font = "64px arial black";
ctx.textAlign = imageToFind.ctx.textAlign = "center";
ctx.textBaseline = imageToFind.ctx.textBaseline = "middle";
const digWidth = imageToFind.ctx.measureText(digit).width+8;
const scale = Math.min(1,64/digWidth);
imageToFind.ctx.fillStyle = "black";
imageToFind.ctx.setTransform(scale,0,0,scale,32,32);
imageToFind.ctx.fillText(digit,0,0);
imageToFind.ctx.setTransform(1,0,0,1,0,0);
imageToFindExtend.ctx.drawImage(imageToFind,32,32);
imageToFind.extendedImage = imageToFindExtend;
// Now fill the canvas with images of other characters
ctx.fillStyle = "white";
ctx.setTransform(1,0,0,1,0,0);
ctx.fillRect(0,0,w,h);
ctx.fillStyle = "black";
ctx.strokeStyle = "white";
ctx.lineJoin = "round";
ctx.lineWidth = 12;
// some characters will be rotated 90,180,-90 deg
const dirs = [
[1,0,0,1,0,0],
[0,1,-1,0,1,0],
[-1,0,0,-1,1,1],
[0,-1,1,0,0,1],
]
// draw random characters at random directions
doFor(h / 64, y => {
doFor(w / 64, x => {
const dir = randA(dirs)
ctx.setTransform(dir[0] * scale,dir[1] * scale,dir[2] * scale,dir[3] * scale,x * 64 + 32, y * 64 + 32);
const d = randA(randomDigits);
ctx.strokeText(d,0,0);
ctx.fillText(d,0,0);
});
});
ctx.setTransform(1,0,0,1,0,0);
// get a copy of the canvas
const saveCan = imageTools.copyImage(ctx.canvas);
// function that finds the images
// image is the image to find
// dir is the matrix direction to find
// smapleSize is the mean sampling size samller numbers are quicker
function checkFor(image,dir,sampleSize){
const can = imageTools.copyImage(saveCan);
const c = can.ctx;
const stepx = 64;
const stepy = 64;
// the image that will contain the reduced means of the differences
const results = imageTools.createImage(Math.ceil(w / stepx) * sampleSize,Math.ceil(h / stepy) * sampleSize);
const e = image.extendedImage;
// for each potencial image location
// set a clip area and draw the source image on it with
// comp mode "difference";
for(var y = 0 ; y < h; y += stepy ){
for(var x = 0 ; x < w; x += stepx ){
c.save();
c.beginPath();
c.rect(x,y,stepx,stepy);
c.clip();
c.globalCompositeOperation = "difference";
c.setTransform(dir[0],dir[1],dir[2],dir[3],x +32 ,y +32 );
c.drawImage(e,-64,-64);
c.restore();
}
}
// Apply the mean (reducing nnumber of pixels to check
results.ctx.drawImage(can,0,0,results.width,results.height);
// get the pixel data
var dat = new Uint32Array(results.ctx.getImageData(0,0,results.width,results.height).data.buffer);
// for each area get the sum of the difference
for(var y = 0; y < results.height; y += sampleSize){
for(var x = 0; x < results.width; x += sampleSize){
var val = 0;
for(var yy = 0; yy < sampleSize && y+yy < results.height; yy += 1){
var i = x + (y+yy)*results.width;
for(var xx = 0; xx < sampleSize && x + xx < results.width ; xx += 1){
val += dat[i++] & 0xFF;
}
}
// if the sum is under the threshold we have found an image
// and we mark it
if(val < sampleSize * sampleSize * 5){
ctx.strokeStyle = "red";
ctx.fillStyle = "rgba(255,0,0,0.5)";
ctx.lineWidth = 2;
ctx.strokeRect(x * (64/sampleSize),y * (64/sampleSize),64,64);
ctx.fillRect(x * (64/sampleSize),y * (64/sampleSize),64,64);
foundCount += 1;
}
}
}
}
var foundCount = 0;
// find the images at different orientations
var now = performance.now();
checkFor(imageToFind,dirs[0],4);
checkFor(imageToFind,dirs[1],6); // rotated images need larger sample size
checkFor(imageToFind,dirs[2],6);
checkFor(imageToFind,dirs[3],6);
var time = performance.now() - now;
var result = document.createElement("div");
result.textContent = "Found "+foundCount +" matching images in "+time.toFixed(3)+"ms. Click to try again.";
document.body.appendChild(result);
// show the image we are looking for
imageToFind.style.left = (64*6 + 16) + "px";
imageToFind.id = "lookingFor";
document.body.appendChild(imageToFind);
}
document.addEventListener("click",doit);
canvas {
border : 2px solid black;
position : absolute;
top : 28px;
left : 2px;
}
#lookingFor {
border : 4px solid red;
}
div {
border : 2px solid black;
position : absolute;
top : 2px;
left : 2px;
}
Click to start test.
Not perfect
The example is not perfect and will sometimes make mistakes. There is a huge amount of room for improving both the accuracy and the speed. This is just something I threw together as an example to show how to use the GPU via the 2D API. Some further maths will be needed to find the statistically good results.
This method can also work for different scales, and rotations, you can even use some of the other comp modes to remove colour and normalize contrast. I have used a very similar approch to stabilize webcam by tracking points from one frame to the next, and a veriaty of other image tracking uses.

How to show spectrum, when music playing?

I need to show spectrum analyzer, when music is playing.
Now, the spetrum drawing separated with audio player.
If file ready (after loading with XHR) and song is playing, synchronously draving the spectrum.
var audioBuffer;
var sourceNode;
var analyser;
var javascriptNode;
var actx = new(AudioContext || webkitAudioContext)(), tid,
url = "https://cdn.rawgit.com/epistemex/free-music-for-test-and-demo/master/music/kf_colibris.mp3";
var ctx = $("#canvas").get()[0].getContext("2d");
var gradient = ctx.createLinearGradient(0,0,0,300);
gradient.addColorStop(1,'#000000');
gradient.addColorStop(0.75,'#ff0000');
gradient.addColorStop(0.25,'#ffff00');
gradient.addColorStop(0,'#ffffff');
setupAudioNodes();
function setupAudioNodes() {
// setup a javascript node
javascriptNode = actx.createScriptProcessor(2048, 1, 1);
// connect to destination, else it isn't called
javascriptNode.connect(actx.destination);
// setup a analyzer
analyser = actx.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
// create a buffer source node
sourceNode = actx.createBufferSource();
sourceNode.connect(analyser);
analyser.connect(javascriptNode);
sourceNode.connect(actx.destination);
}
// load the specified sound
function playSound(buffer) {
sourceNode.buffer = buffer;
sourceNode.start(0);
}
// log if an error occurs
function onError(e) {
console.log(e);
}
// when the javascript node is called
// we use information from the analyzer node
// to draw the volume
javascriptNode.onaudioprocess = function() {
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
// set the fill style
ctx.fillStyle=gradient;
drawSpectrum(array);
}
function drawSpectrum(array) {
for ( var i = 0; i < (array.length); i++ ){
var value = array[i];
ctx.fillRect(i*5,325-value,3,325);
// console.log([i,value])
}
};
// old draw --------------------------------------------------------
// STEP 1: Load audio file using AJAX ----------------------------------
loadXHR(url, decode);
tid = setInterval(function() {document.querySelector("div").innerHTML += "."}, 500);
function loadXHR(url, callback) {
try {
var xhr = new XMLHttpRequest();
xhr.open("GET", url);
xhr.responseType = "arraybuffer";
xhr.onerror = function() {console.log("Network error.")};
xhr.onload = function() {
if (xhr.status === 200) callback(xhr.response);
else console.log("Loading error:" + xhr.statusText);
};
xhr.send();
} catch (err) {console.log(err.message)}
}
// STEP 2: Decode the audio file ---------------------------------------
function decode(buffer) {
clearInterval(tid);
document.querySelector("div").innerHTML = "Decoding file...";
actx.decodeAudioData(buffer, split);
}
// STEP 3: Split the buffer --------------------------------------------
function split(abuffer) {
document.querySelector("div").innerHTML = "Splitting...";
setTimeout(function() { // to allow DOM to update status-text
// calc number of segments and segment length
var channels = abuffer.numberOfChannels,
duration = abuffer.duration
rate = abuffer.sampleRate,
segmentLen = 10,
count = Math.floor(duration / segmentLen),
offset = 0,
// block = 10 * rate;
block = abuffer.length;
// while(count--) {
var url = URL.createObjectURL(bufferToWave(abuffer, offset, block));
var audio = new Audio(url);
audio.controls = true;
audio.volume = 0.5;
audio.autoplay = true;
document.body.appendChild(audio);
//offset += block;
// }
document.querySelector("div").innerHTML = "Ready!";
}, 60)
playSound(abuffer);
}
// Convert a audio-buffer segment to a Blob using WAVE representation
function bufferToWave(abuffer, offset, len) {
var numOfChan = abuffer.numberOfChannels,
length = len * numOfChan * 2 + 44,
buffer = new ArrayBuffer(length),
view = new DataView(buffer),
channels = [], i, sample,
pos = 0;
// write WAVE header
setUint32(0x46464952); // "RIFF"
setUint32(length - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(abuffer.sampleRate);
setUint32(abuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit (hardcoded in this demo)
setUint32(0x61746164); // "data" - chunk
setUint32(length - pos - 4); // chunk length
// write interleaved data
for(i = 0; i < abuffer.numberOfChannels; i++)
channels.push(abuffer.getChannelData(i));
while(pos < length) {
for(i = 0; i < numOfChan; i++) { // interleave channels
sample = Math.max(-1, Math.min(1, channels[i][offset])); // clamp
sample = (0.5 + sample < 0 ? sample * 32768 : sample * 32767)|0; // scale to 16-bit signed int
view.setInt16(pos, sample, true); // update data chunk
pos += 2;
}
offset++ // next source sample
}
// create Blob
return new Blob([buffer], {type: "audio/wav"});
function setUint16(data) {
view.setUint16(pos, data, true);
pos += 2;
}
function setUint32(data) {
view.setUint32(pos, data, true);
pos += 4;
}
}
<canvas id="canvas" width="1000" height="325" style="display: block;"></canvas>
<div>Loading.</div>
Do NOT use a ScriptProcessorNode's onaudioprocess to do visual updates - skip the ScriptProcessor altogether, and use requestAnimationFrame to call getByteFrequencyData and do visual updates.

Getting pixel data on setInterval with canvas

I want to build an animated alphabet, made up of particles. Basically, the particles transform from one letter shape to another.
My idea is to fill the letters as text on canvas real quickly (like for a frame), get the pixel data and put the particles to the correct location on setInterval. I have this code for scanning the screen right now:
var ctx = canvas.getContext('2d'),
width = ctx.canvas.width,
height = ctx.canvas.height,
particles = [],
gridX = 8,
gridY = 8;
function Particle(x, y) {
this.x = x;
this.y = y;
}
// fill some text
ctx.font = 'bold 80px sans-serif';
ctx.fillStyle = '#ff0';
ctx.fillText("STACKOVERFLOW", 5, 120);
// now parse bitmap based on grid
var idata = ctx.getImageData(0, 0, width, height);
// use a 32-bit buffer as we are only checking if a pixel is set or not
var buffer32 = new Uint32Array(idata.data.buffer);
// using two loops here, single loop with index-to-x/y is also an option
for(var y = 0; y < height; y += gridY) {
for(var x = 0; x < width; x += gridX) {
//buffer32[] will have a value > 0 (true) if set, if not 0=false
if (buffer32[y * width + x]) {
particles.push(new Particle(x, y));
}
}
}
// render particles
ctx.clearRect(0, 0, width, height);
particles.forEach(function(p) {
ctx.fillRect(p.x - 2, p.y - 2, 4, 4); // just squares here
})
But this way I am only showing one word, without any changes throughout the time. Also, I want to set up initially like 200 particles and reorganise them based on the pixel data, not create them on each scan.. How would you rewrite the code, so on every 1500ms I can pass a different letter and render it with particles?
Hopefully the different parts of this code should be clear enough : There are particles, that can draw and update, fillParticle will spawn particles out of a text string, and spawnChars will get a new part of the text rendered on a regular basis.
It is working quite well, play with the parameters if you wish, they are all at the start of the fiddle.
You might want to make this code cleaner, by avoiding globals and creating classes.
http://jsbin.com/jecarupiri/1/edit?js,output
// --------------------
// parameters
var text = 'STACKOVERFLOW';
var fontHeight = 80;
var gridX = 4,
gridY = 4;
var partSize = 2;
var charDelay = 400; // time between two chars, in ms
var spead = 80; // max distance from start point to final point
var partSpeed = 0.012;
// --------------------
var canvas = document.getElementById('cv'),
ctx = canvas.getContext('2d'),
width = ctx.canvas.width,
height = ctx.canvas.height,
particles = [];
ctx.translate(0.5,0.5);
// --------------------
// Particle class
function Particle(startX, startY, finalX, finalY) {
this.speed = partSpeed*(1+Math.random()*0.5);
this.x = startX;
this.y = startY;
this.startX = startX;
this.startY = startY;
this.finalX =finalX;
this.finalY =finalY;
this.parameter = 0;
this.draw = function() {
ctx.fillRect(this.x - partSize*0.5, this.y - partSize*0.5, partSize, partSize);
};
this.update = function(p) {
if (this.parameter>=1) return;
this.parameter += partSpeed;
if (this.parameter>=1) this.parameter=1;
var par = this.parameter;
this.x = par*this.finalX + (1-par)*this.startX;
this.y = par*this.finalY + (1-par)*this.startY;
};
}
// --------------------
// Text spawner
function fillParticle(text, offx, offy, spread) {
// fill some text
tmpCtx.clearRect(0,0,tmpCtx.canvas.width, tmpCtx.canvas.height);
tmpCtx.font = 'bold ' + fontHeight +'px sans-serif';
tmpCtx.fillStyle = '#A40';
tmpCtx.textBaseline ='top';
tmpCtx.textAlign='left';
tmpCtx.fillText(text, 0, 0);
//
var txtWidth = Math.floor(tmpCtx.measureText(text).width);
// now parse bitmap based on grid
var idata = tmpCtx.getImageData(0, 0, txtWidth, fontHeight);
// use a 32-bit buffer as we are only checking if a pixel is set or not
var buffer32 = new Uint32Array(idata.data.buffer);
// using two loops here, single loop with index-to-x/y is also an option
for(var y = 0; y < fontHeight; y += gridY) {
for(var x = 0; x < txtWidth; x += gridX) {
//buffer32[] will have a value > 0 (true) if set, if not 0=false
if (buffer32[y * txtWidth + x]) {
particles.push(new Particle(offx + x+Math.random()*spread - 0.5*spread,
offy + y+Math.random()*spread - 0.5*spread, offx+x, offy+y));
}
}
}
return txtWidth;
}
var tmpCv = document.createElement('canvas');
// uncomment for debug
//document.body.appendChild(tmpCv);
var tmpCtx = tmpCv.getContext('2d');
// --------------------------------
// spawn the chars of the text one by one
var charIndex = 0;
var lastSpawnDate = -1;
var offX = 30;
var offY = 30;
function spawnChars() {
if (charIndex>= text.length) return;
if (Date.now()-lastSpawnDate < charDelay) return;
offX += fillParticle(text[charIndex], offX, offY, spead);
lastSpawnDate = Date.now();
charIndex++;
}
// --------------------------------
function render() {
// render particles
particles.forEach(function(p) { p.draw();
});
}
function update() {
particles.forEach(function(p) { p.update(); } );
}
// --------------------------------
// animation
function animate(){
requestAnimationFrame(animate);
ctx.clearRect(0, 0, width, height);
render();
update();
//
spawnChars();
}
// launch :
animate();