In my app, I have multiple open peer connections and I want to be able to mute the microphone on the peer connection level, not globally (as is done here).
Chrome is straightforward:
Call removeStream when muting
Call addStream when unmuting
Negative: I understand that we are moving towards a addTrack/removeTrack world, so this solution is not compatible with other browsers and the future.
Firefox does not work at all:
removeTrack/addTrack requires renegotiation, which is not acceptable, as it takes time
replaceTrack does not require renegotiation and my idea would be to have an empty MediaStreamTrack for mute that I could use to replace the former MediaStreamTrack. Any idea how to do that?
Alternatively, any ideas on a viable Firefox solution / a cooler Chrome solution / a unified approach?
The way to do it in Firefox (and Chrome, and the future) is to clone the tracks, to give you independent track.enabled controls:
var track1, track2;
navigator.mediaDevices.getUserMedia({audio: true}).then(stream => {
var clone = stream.clone();
track1 = stream.getAudioTracks()[0];
track2 = clone.getAudioTracks()[0];
})
var toggle = track => track.enabled = !track.enabled;
Try it below (use https fiddle in Chrome):
var track1, track2;
navigator.mediaDevices.getUserMedia({audio: true}).then(stream => {
var clone = stream.clone();
track1 = stream.getAudioTracks()[0];
track2 = clone.getAudioTracks()[0];
return Promise.all([spectrum(stream), spectrum(clone)]);
}).catch(e => console.log(e));
var toggle = track => track && (track.enabled = !track.enabled);
var spectrum = stream => {
var audioCtx = new AudioContext();
var analyser = audioCtx.createAnalyser();
var source = audioCtx.createMediaStreamSource(stream);
source.connect(analyser);
var canvas = document.createElement("canvas");
var canvasCtx = canvas.getContext("2d");
canvas.width = window.innerWidth/2 - 20;
canvas.height = window.innerHeight/2 - 20;
document.body.appendChild(canvas);
var data = new Uint8Array(canvas.width);
canvasCtx.strokeStyle = 'rgb(0, 125, 0)';
setInterval(() => {
canvasCtx.fillStyle = "#a0a0a0";
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
analyser.getByteFrequencyData(data);
canvasCtx.lineWidth = 2;
data.forEach((y, x) => {
y = canvas.height - (y / 128) * canvas.height / 4;
var c = Math.floor((x*255)/canvas.width);
canvasCtx.fillStyle = "rgb("+c+",0,"+(255-x)+")";
canvasCtx.fillRect(x, y, 2, canvas.height - y)
});
analyser.getByteTimeDomainData(data);
canvasCtx.lineWidth = 5;
canvasCtx.beginPath();
data.forEach((y, x) => {
y = canvas.height - (y / 128) * canvas.height / 2;
x ? canvasCtx.lineTo(x, y) : canvasCtx.moveTo(x, y);
});
canvasCtx.stroke();
var bogus = source; // avoid GC or the whole thing stops
}, 1000 * canvas.width / audioCtx.sampleRate);
};
<button onclick="toggle(track1)">Mute A</button>
<button onclick="toggle(track2)">Mute B</button><br>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
Then feed the two tracks to different peer connections. This works with video mute too.
Related
It's possible to load a multichannel WAV (i.e. first order ambisonics) file using an html <audio> element, and create a MediaElementAudioSourceNode from that element.
Regarding the channelCount property of that MediaElementAudioSourceNode, the documentation states that this will depend on the HTMLMediaElement the node was created from, but when trying this the channel count is always 2.
Is it possible to create a node that has the channel count corresponding to the number of channels of the loaded file ?
Best,
N
To answer my own question, it seems like all tracks are loaded and played back, you just need to adjust some values. I set the channel count to 4 and channel interpretation to discrete, then used a channel splitter to route the channels individually to an analyzer, e voila, I can see all the different waveforms. Example below.
<html>
<body>
<h1> Multitrack Experiment </h1>
<p>This is a little experiment to see if all channels of a multichannel audio file are played back,
which they are.</p>
<p>The audio file contains four different waveforms on four channels. (0 -> sine, 1 -> saw, 2 -> square, 3 -> noise)</p>
<audio src="tracktest.wav"></audio>
<button>
<span>Play/Pause</span>
</button>
<br/>
<br/>
<canvas id="oscilloscope"></canvas>
</body>
<script>
// for legacy browsers
//const AudioContext = window.AudioContext || window.webkitAudioContext;
var playing = false;
const audioContext = new AudioContext();
// get the audio element
const audioElement = document.querySelector('audio');
// pass it into the audio context
// configure the mediaelement source correctly
// (otherwise it still shows 2 channels)
// also change channel interpretation while you're at it ...
const track = audioContext.createMediaElementSource(audioElement);
track.outputChannels = 4;
track.channelInterpretation = 'discrete';
// just for monitoring purposes
track.connect(audioContext.destination);
// split channels to be able to
const splitter = audioContext.createChannelSplitter(4);
track.connect(splitter);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 2048;
// uncomment the different options to see the different results ...
splitter.connect(analyser, 0, 0);
//splitter.connect(analyser, 1, 0);
//splitter.connect(analyser, 2, 0);
//splitter.connect(analyser, 3, 0);
// select our play button
const playButton = document.querySelector('button');
playButton.addEventListener('click', function() {
// check if context is in suspended state (autoplay policy)
if (audioContext.state === 'suspended') {
audioContext.resume();
}
console.log(track)
// play or pause track depending on state
if (!playing) {
console.log("play");
audioElement.play();
playing = true;
} else {
console.log("stop");
audioElement.pause();
playing = false;
}
}, false);
const bufferLength = analyser.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
analyser.getByteTimeDomainData(dataArray);
// Get a canvas defined with ID "oscilloscope"
const canvas = document.getElementById("oscilloscope");
const canvasCtx = canvas.getContext("2d");
// draw an oscilloscope of the current audio source
function draw() {
requestAnimationFrame(draw);
analyser.getByteTimeDomainData(dataArray);
canvasCtx.fillStyle = "rgb(200, 200, 200)";
canvasCtx.fillRect(0, 0, canvas.width, canvas.height);
canvasCtx.lineWidth = 2;
canvasCtx.strokeStyle = "rgb(0, 0, 0)";
canvasCtx.beginPath();
const sliceWidth = canvas.width * 1.0 / bufferLength;
let x = 0;
for (let i = 0; i < bufferLength; i++) {
const v = dataArray[i] / 128.0;
const y = v * canvas.height / 2;
if (i === 0) {
canvasCtx.moveTo(x, y);
} else {
canvasCtx.lineTo(x, y);
}
x += sliceWidth;
}
canvasCtx.lineTo(canvas.width, canvas.height / 2);
canvasCtx.stroke();
}
draw();
</script>
</html>
I am testing the FPS with my laptop using the Intel(R) Iris(R) Plus Graphics 655 card.
To test the threeJS example with Instance rendering and merge-drawcall rendering.
So I used both the QRCode_buffergeometry.json model and the suzanne_buffergeometry.json model.
for the QRCode_buffergeometry.json: vertex:12852, face: 4284
and for the suzanne_buffergeometry.json: vertex:1515 face: 967
Then the FPS for the suzanne_buffergeometry with 8000 count:
INSTANCE: 36
MERGED: 43
NATIVE: from 23 to 35 by rotation
for the QRCode_buffergeometry model with 8000 count:
INSTANCE: 9
MERGED: 15-17
NATIVE: 17-19
I am very confused with this performance.
1. As far as my understanding, with no matter if i use instance or merge-drawcall, the drawcall is fixed to be 1 and the total face number to draw is same, why merged-drawcall is better than instance? Since the face and vertex number are both same, I suppose what happened in the vertex shader for transform the vertex should be same too, so why merged is faster?
For the QRCode_buffergeometry model, native is almost same as merged, and better than instance, so I guess the CPU is not the bottle neck but the GPU is, however the final drawing data should be same, i mean eventually the face number to be draw should be same, why native is faster?, isn't that the instance is supposed to be the best way? I am pretty sure the camera's far and near is big enough, so there should not be any culling issue.
When I am trying to optimize some big scene, when should I pick merge? when to pick instance? and when maybe no doing anything is better?
Any help?
Thanks a lot~~~
Attached the code for the sample is here
body { margin: 0; }
<div id="container"></div>
<script type="module">
import * as THREE from 'https://cdn.jsdelivr.net/npm/three#0.112.1/build/three.module.js';
import Stats from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/libs/stats.module.js';
import {
GUI
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/libs/dat.gui.module.js';
import {
OrbitControls
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/controls/OrbitControls.js';
import {
BufferGeometryUtils
} from 'https://cdn.jsdelivr.net/npm/three#0.112.1/examples/jsm/utils/BufferGeometryUtils.js';
var container, stats, gui, guiStatsEl;
var camera, controls, scene, renderer, material;
// gui
var Method = {
INSTANCED: 'INSTANCED',
MERGED: 'MERGED',
NAIVE: 'NAIVE'
};
var api = {
method: Method.INSTANCED,
mesh_number: 1,
count_per_mesh: 1000
};
var modelName = 'suzanne_buffergeometry.json';
var modelScale = (modelName === 'suzanne_buffergeometry.json' ? 1 : 0.01);
var modelVertex = (modelName === 'suzanne_buffergeometry.json' ? 1515 : 12852);
var modelFace = (modelName === 'suzanne_buffergeometry.json' ? 967 : 4284);
//
init();
initMesh();
animate();
//
function clean() {
var meshes = [];
scene.traverse(function(object) {
if (object.isMesh) meshes.push(object);
});
for (var i = 0; i < meshes.length; i++) {
var mesh = meshes[i];
mesh.material.dispose();
mesh.geometry.dispose();
scene.remove(mesh);
}
}
var randomizeMatrix = function() {
var position = new THREE.Vector3();
var rotation = new THREE.Euler();
var quaternion = new THREE.Quaternion();
var scale = new THREE.Vector3();
return function(matrix) {
position.x = Math.random() * 40 - 20;
position.y = Math.random() * 40 - 20;
position.z = Math.random() * 40 - 20;
rotation.x = Math.random() * 2 * Math.PI;
rotation.y = Math.random() * 2 * Math.PI;
rotation.z = Math.random() * 2 * Math.PI;
quaternion.setFromEuler(rotation);
scale.x = scale.y = scale.z = Math.random() * modelScale;
matrix.compose(position, quaternion, scale);
};
}();
function initMesh() {
clean();
console.time(api.method + ' (build)');
for (var i = 0; i < api.mesh_number; i++) {
// make instances
new THREE.BufferGeometryLoader()
.setPath('https://threejs.org/examples/models/json/')
.load(modelName, function(geometry) {
material = new THREE.MeshNormalMaterial();
geometry.computeVertexNormals();
switch (api.method) {
case Method.INSTANCED:
makeInstanced(geometry);
break;
case Method.MERGED:
makeMerged(geometry);
break;
case Method.NAIVE:
makeNaive(geometry);
break;
}
});
}
console.timeEnd(api.method + ' (build)');
var drawCalls = 0;
switch (api.method) {
case Method.INSTANCED:
case Method.MERGED:
drawCalls = api.mesh_number;
break;
case Method.NAIVE:
drawCalls = api.mesh_number * api.count_per_mesh;
break;
}
guiStatsEl.innerHTML = [
'<i>GPU draw calls</i>: ' + drawCalls,
'<i>Face Number</i>: ' + (modelFace * api.mesh_number * api.count_per_mesh),
'<i>Vertex Number</i>: ' + (modelVertex * api.mesh_number * api.count_per_mesh)
].join('<br/>');
}
function makeInstanced(geometry, idx) {
var matrix = new THREE.Matrix4();
var mesh = new THREE.InstancedMesh(geometry, material, api.count_per_mesh);
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
mesh.setMatrixAt(i, matrix);
}
scene.add(mesh);
}
function makeMerged(geometry, idx) {
var instanceGeometry;
var geometries = [];
var matrix = new THREE.Matrix4();
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
var instanceGeometry = geometry.clone();
instanceGeometry.applyMatrix(matrix);
geometries.push(instanceGeometry);
}
var mergedGeometry = BufferGeometryUtils.mergeBufferGeometries(geometries);
scene.add(new THREE.Mesh(mergedGeometry, material));
}
function makeNaive(geometry, idx) {
var matrix = new THREE.Matrix4();
for (var i = 0; i < api.count_per_mesh; i++) {
randomizeMatrix(matrix);
var mesh = new THREE.Mesh(geometry, material);
mesh.applyMatrix(matrix);
scene.add(mesh);
}
}
function init() {
var width = window.innerWidth;
var height = window.innerHeight;
// camera
camera = new THREE.PerspectiveCamera(70, width / height, 1, 100);
camera.position.z = 30;
// renderer
renderer = new THREE.WebGLRenderer({
antialias: true
});
renderer.setPixelRatio(window.devicePixelRatio);
renderer.setSize(width, height);
renderer.outputEncoding = THREE.sRGBEncoding;
container = document.getElementById('container');
container.appendChild(renderer.domElement);
// scene
scene = new THREE.Scene();
scene.background = new THREE.Color(0xffffff);
// controls
controls = new OrbitControls(camera, renderer.domElement);
controls.autoRotate = true;
// stats
stats = new Stats();
container.appendChild(stats.dom);
// gui
gui = new GUI();
gui.add(api, 'method', Method).onChange(initMesh);
gui.add(api, 'count_per_mesh', 1, 20000).step(1).onChange(initMesh);
gui.add(api, 'mesh_number', 1, 200).step(1).onChange(initMesh);
var perfFolder = gui.addFolder('Performance');
guiStatsEl = document.createElement('li');
guiStatsEl.classList.add('gui-stats');
perfFolder.__ul.appendChild(guiStatsEl);
perfFolder.open();
// listeners
window.addEventListener('resize', onWindowResize, false);
Object.assign(window, {
scene
});
}
//
function onWindowResize() {
var width = window.innerWidth;
var height = window.innerHeight;
camera.aspect = width / height;
camera.updateProjectionMatrix();
renderer.setSize(width, height);
}
function animate() {
requestAnimationFrame(animate);
controls.update();
stats.update();
render();
}
function render() {
renderer.render(scene, camera);
}
//
function getGeometryByteLength(geometry) {
var total = 0;
if (geometry.index) total += geometry.index.array.byteLength;
for (var name in geometry.attributes) {
total += geometry.attributes[name].array.byteLength;
}
return total;
}
// Source: https://stackoverflow.com/a/18650828/1314762
function formatBytes(bytes, decimals) {
if (bytes === 0) return '0 bytes';
var k = 1024;
var dm = decimals < 0 ? 0 : decimals;
var sizes = ['bytes', 'KB', 'MB'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];
}
</script>
This is only guesses
Three.js by default culls if things are outside the frustum.
We can turn this off with mesh.frustumCulled = false. I didn't notice a difference and this should show up in the draw count.
Three.js by default sorts opaque objects back to front.
This means everything else being equal, sorted will run faster
than unsorted because of the depth test. If I set the depth test
to always
material.depthFunc = THREE.AlwaysDepth
Then I seem to get slightly faster rendering with instanced vs native. Of course
everything else is not equal.
An issue in Chrome.
If I run in Firefox or Safari I get the expected results. Merged > Instanced > Native
It could be a bug or it could be they're working around a driver or
security issue that the other browsers are not. You'd have to ask.
I have this HTML code to allow a user to upload a picture on my hybrid iOS app:
<input type="file" accept="image/*">
When selected, the web view gives the user 2 options.
1. Take a new picture 2. Choose an existing picture.
When the users selects 'Choose an existing picture' everything works fine. But if the user selects 'Take a new picture', when it uploads to Firebase Storage, it somehow uploads sideways.
How can I fix this? This is not ideal, but is there a way to only allow an 'existing picture' option?
Here is my code that processes the image and shows it in a <img>.
function fileUploaded(files) {
var file = files[0];
var reader = new FileReader();
reader.onload = function (e) {
var img = document.createElement("img");
img.src = e.target.result;
img.onload = function () {
var canvas = document.createElement("canvas");
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0);
canvas.width = 200;
canvas.height = 200;
var xStart = 0;
var yStart = 0;
var newHeight = 0;
var newWidth = 0;
var aspectRadio = img.height / img.width;
if (img.height < img.width) {
//horizontal
aspectRadio = img.width / img.height;
newHeight = 200;
newWidth = aspectRadio * 200;
xStart = -(newWidth - 200) / 2;
} else {
//vertical
newWidth = 200;
newHeight = aspectRadio * 200;
yStart = -(newHeight - 200) / 2;
}
var ctx = canvas.getContext("2d");
ctx.drawImage(img, xStart, yStart, newWidth, newHeight);
dataurl = canvas.toDataURL(file.type);
document.getElementById('img_element').src = dataurl;
}
}
reader.readAsDataURL(file);
}
Are you displaying the image before uploading? You might want to do that to see if it is already rotated by iOS (similar issue here). As for a fix, this might be what you will want to use.
Edit:
My previous response was somewhat vague. To be more precise, you're having a problem with the EXIF orientation. Here is a JSFiddle that will show you the orientation of the image and how it looks as an <img/> vs. as a <canvas>. The fix previously mentioned is still the path you will probably want to take. You should be able to use the library to correctly orient the image in a <canvas>. Here is an example of that library in use. Hope this helps!
hi can u help me to setup this code. I m not so good at html5.
This text is displayed if your browser does not support HTML5 Canvas.
$(document).ready(function() {
// initialize some variables for the chart
var
canvas = $("#canvas")[0];
var ctx = canvas.getContext('2d');
var data = [75,68,32,95,20,51];
var colors = ["#7E3817", "#C35817", "#EE9A4D", "#A0C544", "#348017", "#307D7E"];
var center = [canvas.width / 2, canvas.height / 2];
var radius = Math.min(canvas.width, canvas.height) / 2;
var lastPosition = 0, total = 0;
var pieData = [];
// total up all the data for chart
for (var i in data) { total += data[i]; }
// populate arrays for each slice
for(var i in data) {
pieData[i] = [];
pieData[i]['value'] = data[i];
pieData[i]['krasa'] = colors[i];
pieData[i]['startAngle'] = 2 * Math.PI * lastPosition;
pieData[i]['endAngle'] = 2 * Math.PI * (lastPosition + (data[i]/total));
lastPosition += data[i]/total;
}
function drawChart()
{
for(var i = 0; i < data.length; i++)
{
var gradient = ctx.createLinearGradient( 0, 0, canvas.width, canvas.height );
gradient.addColorStop( 0, "#ddd" );
gradient.addColorStop( 1, colors[i] );
ctx.beginPath();
ctx.moveTo(center[0],center[1]);
ctx.arc(center[0],center[1],radius,pieData[i]['startAngle'],pieData[i]['endAngle'],false);
ctx.lineTo(center[0],center[1]);
ctx.closePath();
ctx.fillStyle = gradient;
ctx.fill();
ctx.lineWidth = 1;
ctx.strokeStyle = "#fff";
ctx.stroke();
}
}
drawChart(); // first render
});
How to add hover effect for each slice?
After you have drawn your wedges to the canvas, they become just pixels in a larger image.
You have no way to track the individual pie wedges at this point. Therefore no way to track hovers on any particular wedge.
But...You do have several options!
Option#1 --- Make your own hit-test to determine which pie wedge you clicked on.
It would look something like this (I HAVE NOT TESTED THIS !!!)
var chartStartAngle=0; // you started drawing the pie at angle 0
function handleChartClick ( clickEvent ) {
// Get the mouse cursor position at the time of the click, relative to the canvas
var mouseX = clickEvent.pageX - this.offsetLeft;
var mouseY = clickEvent.pageY - this.offsetTop;
// Was the click inside the pie chart?
var xFromCenter = mouseX - center[0];
var yFromCenter = mouseY - center[1];
var distanceFromCenter = Math.sqrt( Math.pow( Math.abs( xFromCenter ), 2 ) + Math.pow( Math.abs( yFromCenter ), 2 ) );
if ( distanceFromCenter <= radius ) {
// You clicked inside the chart.
// So get the click angle
var clickAngle = Math.atan2( yFromCenter, xFromCenter ) - chartStartAngle;
if ( clickAngle < 0 ) clickAngle = 2 * Math.PI + clickAngle;
for ( var i in pieData ) {
if ( clickAngle >= pieData[i]['startAngle'] && clickAngle <= pieData[i]['endAngle'] ) {
// You clicked on pieData[i]
// So do your effect here!
return;
}
}
}
}
Option#2 --- Use a cavas library which allows you to keep track of each wedge in your pie chart and therefore do your hover effect. Several good libraries (among many good ones) are: EaselJs, FabricJs and KineticJs.
Elated.com has a great tutorial that shows what you're looking for. Check it out: http://www.elated.com/articles/snazzy-animated-pie-chart-html5-jquery/
I'm drawing text on Canvas, and am disappointed with the quality of antialiasing. As far as I've been able to determine, browsers don't do subpixel antialising of text on Canvas.
Is this accurate?
This is particularly noticeable on iPhone and Android, where the resulting text isn't as crisp as text rendered by other DOM elements.
Any suggestions for high quality text out put on Canvas?
Joubert
My answer came from this link, maybe it will help someone else.
http://www.html5rocks.com/en/tutorials/canvas/hidpi/
The important code is as follows.
// finally query the various pixel ratios
devicePixelRatio = window.devicePixelRatio || 1,
backingStoreRatio = context.webkitBackingStorePixelRatio ||
context.mozBackingStorePixelRatio ||
context.msBackingStorePixelRatio ||
context.oBackingStorePixelRatio ||
context.backingStorePixelRatio || 1,
ratio = devicePixelRatio / backingStoreRatio;
// upscale the canvas if the two ratios don't match
if (devicePixelRatio !== backingStoreRatio) {
var oldWidth = canvas.width;
var oldHeight = canvas.height;
canvas.width = oldWidth * ratio;
canvas.height = oldHeight * ratio;
canvas.style.width = oldWidth + 'px';
canvas.style.height = oldHeight + 'px';
// now scale the context to counter
// the fact that we've manually scaled
// our canvas element
context.scale(ratio, ratio);
}
Try adding the following META tag to your page. This seems to fix anti-aliasing issues I've had on iPhone Safari:
<meta name="viewport" content="user-scalable=no, width=device-width,
initial-scale=0.5, minimum-scale=0.5, maximum-scale=0.5" />
I realise this is an old question, but I worked on this problem today and got it working nicely. I used Alix Axel's answer above and stripped down the code I found there (on the web.archive.org link) to the bare essentials.
I modified the solution a bit, using two canvases, one hidden canvas for the original text and a second canvas to actually show the anti-aliaised text.
Here's what I came up with... http://jsfiddle.net/X2cKa/
The code looks like this;
function alphaBlend(gamma, c1, c2, alpha) {
c1 = c1/255.0;
c2 = c2/255.0;
var c3 = Math.pow(
Math.pow(c1, gamma) * (1 - alpha)
+ Math.pow(c2, gamma) * alpha,
1/gamma
);
return Math.round(c3 * 255);
}
function process(textPixels, destPixels, fg, bg) {
var gamma = 2.2;
for (var y = 0; y < textPixels.height; y ++) {
var history = [255, 255, 255];
var pixel_number = y * textPixels.width;
var component = 0;
for (var x = 0; x < textPixels.width; x ++) {
var alpha = textPixels.data[(y * textPixels.width + x) * 4 + 1] / 255.0;
alpha = Math.pow(alpha, gamma);
history[component] = alpha;
alpha = (history[0] + history[1] + history[2]) / 3;
out = alphaBlend(gamma, bg[component], fg[component], alpha);
destPixels.data[pixel_number * 4 + component] = out;
/* advance to next component/pixel */
component ++;
if (component == 3) {
pixel_number ++;
component = 0;
}
}
}
}
function toColor(colorString) {
return [parseInt(colorString.substr(1, 2), 16),
parseInt(colorString.substr(3, 2), 16),
parseInt(colorString.substr(5, 2), 16)];
}
function renderOnce() {
var phrase = "Corporate GOVERNANCE"
var c1 = document.getElementById("c1"); //the hidden canvas
var c2 = document.getElementById("c2"); //the canvas
var textSize=40;
var font = textSize+"px Arial"
var fg = "#ff0000";
var bg = "#fff9e1";
var ctx1 = c1.getContext("2d");
var ctx2 = c2.getContext("2d");
ctx1.fillStyle = "rgb(255, 255, 255)";
ctx1.fillRect(0, 0, c1.width, c1.height);
ctx1.save();
ctx1.scale(3, 1);
ctx1.font = font;
ctx1.fillStyle = "rgb(255, 0, 0)";
ctx1.fillText(phrase, 0, textSize);
ctx1.restore();
var textPixels = ctx1.getImageData(0, 0, c1.width, c1.height);
var colorFg = toColor(fg);
var colorBg = toColor(bg);
var destPixels3 = ctx1.getImageData(0, 0, c1.width, c1.height);
process(textPixels, destPixels3, colorBg, colorFg);
ctx2.putImageData(destPixels3, 0, 0);
//for comparison, show Comparison Text without anti aliaising
ctx2.font = font;
ctx2.fillStyle = "rgb(255, 0, 0)";
ctx2.fillText(phrase, 0, textSize*2);
};
renderOnce();
I also added a comparison text object so that you can see the anti-aliasing working.
Hope this helps someone!
There is some subpixel antialiasing done, but it is up to the browser/OS.
There was a bit of an earlier discussion on this that may be of help to you.
I don't have an android or iOS device but just for kicks, try translating the context by (.5, 0) pixels before you draw and see if that makes a difference in how your text is rendered.