Find distance between two selected coordinates on image using canvas - html

I want to find a length of line and radius of circle on image depend on image width
see below image
var canvas = document.getElementById('loadCanvas'),lastPos, isDown = false;
ctx = canvas.getContext("2d");
ctx.drawImage(this, 0, 0, canvas.width, canvas.height);
ctx.lineCap = "round";
ctx.lineWidth = $('#canvasSelWidth').val();
ctx.globalCompositeOperation = "multiply";
ctx.strokeStyle = $('#canvasSelColor').val();
canvas.onmousedown = function(e) {
isDown = true;
SPos = getPos(e);
lastPos = getPos(e);
};
window.onmousemove = function(e) {
if (!isDown) return;
var pos = getPos(e);
ctx.beginPath();
ctx.moveTo(lastPos.x, lastPos.y);
ctx.lineTo(pos.x, pos.y);
ctx.stroke();
lastPos = pos;
};
window.onmouseup = function(e) {
isDown = false
lPos = getPos(e);
measurementOnImageCanvas();
};
function getPos(e) {
var rect = canvas.getBoundingClientRect();
xPosition = e.clientX - rect.left;
yPosition = e.clientY - rect.top;
return {x: e.clientX - rect.left, y: e.clientY - rect.top}
}
I am use for first coordinate lPos and last SPos.
var xCorData = lPos.x - SPos.x
var yCorData = lPos.y - SPos.y
var finalPixel = Math.sqrt( xCorData*xCorData + yCorData*yCorData );
var centimeters = finalPixel * 2.54 / 96;
var mm = centimeters*10;
var inch = mm*0.0393701;
Please help me for short out from this problem

Can not be done
I am assuming you wish to get the physical size of a pixel on the client machine. Unfortunately there is no way to get the display dimensions.
window.screen.width and window.screen.height will get you the resolution of the display but there is no way of knowing the size of the display. Even if it was possible to get the device brand and model you still do not know if it is using its own display or is plugged into another. Even worse, multi display setups may have two or more different screen sizes so that your canvas has regions where the pixel physical size is different.
All you can do is ask the clien to enter the screen dimetions.
Assuming you have the screen diagonal.
At the moment I am on a 17.3 laptop with a 1680 by 945 pixel display. To get the pixel size.
Assume that the pixels are square.
const mmPerInch = 25.4; // constant
var screenDiagonal = 17.3 * mmPerInch; // ??? how to get this (17.3) is the problem
var resX = window.screen.width; // nor do you know if the pixel is square
var resY = window.screen.height;
// now get the number of pixels diagonally
if(typeof Math.hypot === "function"){ // use the new hypot function if available
var pixelsDiagonal = Math.hypot(resX,resY);
}else{
var pixelsDiagonal = Math.sqrt(resX*resX+resY*resY);
}
// then divide the screen size by the pixels to get the pixel size.
var pixelSize_mm = screenDiagonal / pixelsDiagonal;
// result pixel is 0.23 by 0.23 mm
You now have the size of a pixel in mm and can use that to get a accurate measure of objects you render. But it is no guarantee as the browser may be zoomed in or out.
To convert from pixels to mm just multiply pixel dimensions by pixel size
function pixel2mm(pixels){
return pixels * pixelSize_mm;
}
Also asking for the diagonal is no guarantee that the correct value is entered or even known. Also not all pixels are square and that will be even harder to find out.

If you want to measure things on a image more than on the screen, you have all the necessary code, just you needed to tie up togheter.
You have to ASSUME that you are on a 96 DPI device or putting the DPI as a parameter. Also giving a fixed scale for your canvas is the another way to go ( as if you would be on a map ).
var canvas = document.getElementById('loadCanvas'),lastPos, isDown = false;
ctx = canvas.getContext("2d");
ctx.lineCap = "round";
ctx.lineWidth = 2;
ctx.strokeStyle = 'blue';
canvas.onmousedown = function(e) {
isDown = true;
SPos = getPos(e);
lastPos = SPos;
};
window.onmousemove = function(e) {
if (!isDown) return;
var pos = getPos(e);
ctx.clearRect(0,0,500,500)
ctx.beginPath();
ctx.moveTo(lastPos.x, lastPos.y);
ctx.lineTo(pos.x, pos.y);
ctx.stroke();
lPos = pos;
};
window.onmouseup = function(e) {
isDown = false
lPos = getPos(e);
measurementOnImageCanvas();
};
function getPos(e) {
var rect = canvas.getBoundingClientRect();
xPosition = e.clientX - rect.left;
yPosition = e.clientY - rect.top;
return {x: e.clientX - rect.left, y: e.clientY - rect.top}
}
function measurementOnImageCanvas() {
var xCorData = lPos.x - SPos.x
var yCorData = lPos.y - SPos.y
var finalPixel = Math.sqrt( xCorData*xCorData + yCorData*yCorData );
var inches = finalPixel / 96;
var centimeters = inches * 2.54;
var millimiters = centimeters * 10;
alert('line lenght:\n' + inches.toFixed(2) + ' inches\n' + centimeters.toFixed(4) + ' centimeters\n' + millimiters.toFixed(2) + ' millimeters\nAssuming you are on a 96 DPI device');
}
<canvas id="loadCanvas" width=500 height=500 />

Related

HTML Canvas to WPF XAML Canvas

I have an ASP.NET application that allows users to click or tap on a Canvas to indicate pain locations on a body image. A body image is displayed on the Canvas and is the same size as the Canvas.
function drawBodyMap() {
var c = document.getElementById('myCanvas');
var ctx = c.getContext('2d');
var imageObj = new Image();
imageObj.src = 'https://.../body.jpg';
imageObj.onload = function () {
ctx.drawImage(imageObj, 0, 0, 600, 367);
};
}
<canvas id="myCanvas" width="600" height="367"></canvas>
<script>
function getMousePos(canvas, evt) {
var rect = canvas.getBoundingClientRect();
return {
x: evt.clientX - rect.left,
y: evt.clientY - rect.top
};
}
var canvas = document.getElementById('myCanvas');
var ctx = canvas.getContext('2d');
canvas.addEventListener('mouseup', function (evt) {
if (ixPos > 9)
return;
var mousePos = getMousePos(canvas, evt);
bodyX[ixPos] = mousePos.x;
bodyY[ixPos] = mousePos.y;
painType[ixPos] = pain_type;
ixPos++;
ctx.beginPath();
ctx.arc(mousePos.x, mousePos.y, 8, 0, 2 * Math.PI);
if (pain_type == 1)
ctx.fillStyle = "#DC143C";
else if (pain_type == 2)
ctx.fillStyle = "#EA728A";
else if (pain_type == 3)
ctx.fillStyle = "#DAA520";
else if (pain_type == 4)
ctx.fillStyle = "#008000";
else if (pain_type == 5)
ctx.fillStyle = "#4169E1";
ctx.fill();
}, false);
</script>
The X,Y points added to the Canvas on the body image are saved to a database. These points are then loaded into a WPF application that displays the same body image on an XAML Canvas. C# code then adds the points over the image.
WPF CODE:
private void DisplayBodyPain()
{
List<BodyPain> pain = gFunc.sws.GetBodyPain(MemberID);
foreach (BodyPain bp in pain)
{
Border b = new Border();
b.Tag = bp.PainType.ToString();
b.Cursor = Cursors.Hand;
b.Width = 16;
b.Height = 16;
b.CornerRadius = new CornerRadius(8);
b.Background = GetPainBrush((byte)bp.PainType);
cvsBody.Children.Add(b);
Canvas.SetTop(b, bp.YPos);
Canvas.SetLeft(b, bp.XPos);
}
}
The problem I have is that the points drawn on the XAML Canvas are all slightly different from the points that were drawn on the HTML Canvas. Each point is not in exactly the same location.
Is there a way I can fix this? Should I be doing it differently?
HTML Canvas
WPF Canvas
I think you need to subtract the size of the marker from the coordinate where you want to place it. For the last two lines, try this instead:
Canvas.SetTop(b, bp.YPos - (b.Height / 2));
Canvas.SetLeft(b, bp.XPos - (b.Width / 2));
By subtracting half the marker's height and width, the center of the marker is placed on the desired coordinates.

How to get bounding box coordinates for canvas content?

I have a canvas with a map. In that canvas the user is able to draw (in red) and the final result will be:
After the user as painted whatever he wants I need to calculate the bounding box coordinates of all the content so I could ultimately have:
Now I can loop through every pixel of the canvas and calculate the bounding box based on every non-empty pixel but this is quite a heavy operation. Any idea of a better logic to achieve the intended results?
You can track what is being drawn and the diameter of the points. Then min/max that for the boundary.
One way to do this is to track position and radius (brush) or boundary (irregular shape) of what is being drawn, then merge that with current min/max bound to update the new bound if needed in effect "pushing" the bounds to always match the interior.
Example
var ctx = c.getContext("2d"),
div = document.querySelector("div > div"),
// keep track of min/max for each axis
minX = Number.MAX_SAFE_INTEGER,
minY = Number.MAX_SAFE_INTEGER,
maxX = Number.MIN_SAFE_INTEGER,
maxY = Number.MIN_SAFE_INTEGER,
// brush/draw stuff for demo
radius = 10,
rect = c.getBoundingClientRect(),
isDown = false;
ctx.fillText("Draw something here..", 10, 10);
ctx.fillStyle = "red";
c.onmousedown = function() {isDown = true};
window.onmouseup = function() {isDown = false};
window.onmousemove = function(e) {
if (isDown) {
var x = e.clientX - rect.left;
var y = e.clientY - rect.top;
// When something is drawn, calculate its impact (position and radius)
var _minX = x - radius;
var _minY = y - radius;
var _maxX = x + radius;
var _maxY = y + radius;
// calc new min/max boundary
if (_minX < minX) minX = _minX > 0 ? _minX : 0;
if (_minY < minY) minY = _minY > 0 ? _minY : 0;
if (_maxX > maxX) maxX = _maxX < c.width ? _maxX : c.width;
if (_maxY > maxY) maxY = _maxY < c.height ? _maxY : c.height;
// show new bounds
showBounds();
// draw something
ctx.beginPath();
ctx.arc(x, y, radius, 0, 6.28);
ctx.fill();
}
};
function showBounds() {
// for demo, using bounds for display purposes (inclusive bound)
div.style.cssText =
"left:" + minX + "px;top:" + minY +
"px;width:" + (maxX-minX-1) + "px;height:" + (maxY-minY-1) +
"px;border:1px solid blue";
}
div {position:relative}
div > div {position:absolute;pointer-events:none}
<div>
<canvas id=c width=600 height=600></canvas>
<div></div>
</div>

How to render a d3.js map on canvas free of blurriness

Try as I might, I've been unable to render a d3.js county map without causing the map to blur significantly.
I'm using the usual tricks: My canvas style width is half that of my attribute width. I translate the context of the drawing half a pixel to offset any unwanted effects.
But it's still terribly blurry.
Can someone share the pattern for a crisp d3.js map made for canvas elements?
function drawQuintiles() {
var width = 960,
height = 500;
var projection = d3.geo.albers()
.scale(666);
var canvas = d3.select("#quintiles")
.append("canvas")
.attr("class",'canvasarea');
var context = canvas.node().getContext("2d");
var ratio = (window.devicePixelRatio / context.webkitBackingStorePixelRatio) || 1;
d3.select('.canvasarea')
.attr("width", width * ratio).attr("height", height * ratio)
.style("width", width + "px").style("height", height + "px");
context.scale(ratio, ratio);
var path = d3.geo.path()
.projection(projection)
.context(context);
d3.json("/data/us-counties.json", function(error, us) {
if (error) throw error;
context.strokeStyle = '#333';
context.beginPath();
var strokeWidth = 0.5;
var iTranslate = (strokeWidth % 2) / 2;
context.translate(iTranslate, 0);
context.lineWidth = strokeWidth;
context.lineCap = "round";
path(topojson.feature(us, us.objects.counties));
context.stroke();
});
}
This is the code I ended on. Removing the scale and the translate hack has the map rendering properly.
function drawQuintiles() {
var width = 1600;
d3.json("/data/us-counties.json", function(error, data) {
var projection = d3.geo.albersUsa();
var path = d3.geo.path().projection(projection);
var tracts = topojson.feature(data, data.objects.counties);
projection.scale(1).translate([0, 0]);
var b = path.bounds(tracts);
var whRatio = ((b[1][0] - b[0][0]) / (b[1][1] - b[0][1]));
var height = (width / 2) * whRatio;
var s = .98 / Math.max((b[1][0] - b[0][0]) / width, (b[1][1] - b[0][1]) / height),
t = [(width - s * (b[1][0] + b[0][0])) / 2, (height - s * (b[1][1] + b[0][1])) / 2];
projection.scale(s).translate(t);
var canvas = d3.select("#quintiles")
.append("canvas")
.attr("class",'canvasarea');
var context = canvas.node().getContext("2d");
var ratio = window.devicePixelRatio || 1;
d3.select('.canvasarea')
.attr("width", width ).attr("height", height )
.style("width", ((width * ratio) ) + "px").style("height", ((height * ratio) ) + "px");
var path = d3.geo.path()
.projection(projection)
.context(context);
if (error) throw error;
context.strokeStyle = '#333';
context.beginPath();
var strokeWidth = 0.5;
context.lineWidth = strokeWidth;
context.lineCap = "round";
path(topojson.feature(data, data.objects.counties));
context.stroke();
});
}
drawQuintiles();

Checking width of string in html canvas

Hello all I've tried relentlessly to write a script that will check the width of a string and if it is greater than a certain amount, lower the font size and draw it again. Except this is not working. Once I initially draw the text, it never updates to the correct font size. It just stays what it was originally and never gets any smaller throwing the script into an endless loop. Here is my code:
var nFontSize = 25;
var draw_name = function(text) {
var c=document.getElementById("badgeCanvas");
var ctx=c.getContext("2d");
ctx.font="nFontSize OpenSansBold";
ctx.fillStyle = "#000000";
ctx.fillText(text,80,25);
var metrics = ctx.measureText(text);
var width = metrics.width;
while (width > 200) {
nFontSize--;
ctx.font="nFontSize OpenSansBold";
ctx.fillText(text,80,25);
metrics = ctx.measureText(text);
width = metrics.width;
}
ctx.font="nFontSize OpenSansBold";
ctx.fillStyle = "#000000";
ctx.fillText(text,80,25);
"nFontSize OpenSansBold" is not a valid font. Try
ctx.font= nFontSize + "px OpenSansBold";
Also, you don't have to render it to measure the text so remove the ctx.fillText(text,80,25); before your while loop and inside your while loop. Otherwise, it will look ugly.
var nFontSize = 25;
var draw_name = function(text) {
var c=document.getElementById("badgeCanvas");
var ctx=c.getContext("2d");
ctx.font= nFontSize + "px OpenSansBold";
ctx.fillStyle = "#000000";
var metrics = ctx.measureText(text);
var width = metrics.width;
while (width > 200) {
nFontSize--;
ctx.font= nFontSize + "px OpenSansBold";
metrics = ctx.measureText(text);
width = metrics.width;
}
ctx.font= nFontSize + "px OpenSansBold";
ctx.fillStyle = "#000000";
ctx.fillText(text,80,25);

HTML5 canvas: is there a way to resize image with "nearest neighbour" resampling?

I have some JS that makes some manipulations with images. I want to have pixelart-like graphics, so I had to enlarge original images in graphics editor.
But I think it'd be good idea to make all the manipulations with the small image and then enlarge it with html5 functionality. This will save bunch of processing time (because now my demo (warning: domain-name may cause some issues at work etc) loads extremely long in Firefox, for example).
But when I try to resize the image, it gets resampled bicubically. How to make it resize image without resampling? Is there any crossbrowser solution?
image-rendering: -webkit-optimize-contrast; /* webkit */
image-rendering: -moz-crisp-edges /* Firefox */
http://phrogz.net/tmp/canvas_image_zoom.html can provide a fallback case using canvas and getImageData. In short:
// Create an offscreen canvas, draw an image to it, and fetch the pixels
var offtx = document.createElement('canvas').getContext('2d');
offtx.drawImage(img1,0,0);
var imgData = offtx.getImageData(0,0,img1.width,img1.height).data;
// Draw the zoomed-up pixels to a different canvas context
for (var x=0;x<img1.width;++x){
for (var y=0;y<img1.height;++y){
// Find the starting index in the one-dimensional image data
var i = (y*img1.width + x)*4;
var r = imgData[i ];
var g = imgData[i+1];
var b = imgData[i+2];
var a = imgData[i+3];
ctx2.fillStyle = "rgba("+r+","+g+","+b+","+(a/255)+")";
ctx2.fillRect(x*zoom,y*zoom,zoom,zoom);
}
}
More: MDN docs on image-rendering
I wrote a NN resizing script a while ago using ImageData (around line 1794)
https://github.com/arahaya/ImageFilters.js/blob/master/imagefilters.js
You can see a demo here
http://www.arahaya.com/imagefilters/
unfortunately the builtin resizing should be slightly faster.
This CSS on the canvas element works:
image-rendering: pixelated;
This works in Chrome 93, as of September 2021.
You can simply set context.imageSmoothingEnabled to false. This will make everything drawn with context.drawImage() resize using nearest neighbor.
// the canvas to resize
const canvas = document.createElement("canvas");
// the canvas to output to
const canvas2 = document.createElement("canvas");
const context2 = canvas2.getContext("2d");
// disable image smoothing
context2.imageSmoothingEnabled = false;
// draw image from the canvas
context2.drawImage(canvas, 0, 0, canvas2.width, canvas2.height);
This has better support than using image-rendering: pixelated.
I'll echo what others have said and tell you it's not a built-in function. After running into the same issue, I've made one below.
It uses fillRect() instead of looping through each pixel and painting it. Everything is commented to help you better understand how it works.
//img is the original image, scale is a multiplier. It returns the resized image.
function Resize_Nearest_Neighbour( img, scale ){
//make shortcuts for image width and height
var w = img.width;
var h = img.height;
//---------------------------------------------------------------
//draw the original image to a new canvas
//---------------------------------------------------------------
//set up the canvas
var c = document.createElement("CANVAS");
var ctx = c.getContext("2d");
//disable antialiasing on the canvas
ctx.imageSmoothingEnabled = false;
//size the canvas to match the input image
c.width = w;
c.height = h;
//draw the input image
ctx.drawImage( img, 0, 0 );
//get the input image as image data
var inputImg = ctx.getImageData(0,0,w,h);
//get the data array from the canvas image data
var data = inputImg.data;
//---------------------------------------------------------------
//resize the canvas to our bigger output image
//---------------------------------------------------------------
c.width = w * scale;
c.height = h * scale;
//---------------------------------------------------------------
//loop through all the data, painting each pixel larger
//---------------------------------------------------------------
for ( var i = 0; i < data.length; i+=4 ){
//find the colour of this particular pixel
var colour = "#";
//---------------------------------------------------------------
//convert the RGB numbers into a hex string. i.e. [255, 10, 100]
//into "FF0A64"
//---------------------------------------------------------------
function _Dex_To_Hex( number ){
var out = number.toString(16);
if ( out.length < 2 ){
out = "0" + out;
}
return out;
}
for ( var colourIndex = 0; colourIndex < 3; colourIndex++ ){
colour += _Dex_To_Hex( data[ i+colourIndex ] );
}
//set the fill colour
ctx.fillStyle = colour;
//---------------------------------------------------------------
//convert the index in the data array to x and y coordinates
//---------------------------------------------------------------
var index = i/4;
var x = index % w;
//~~ is a faster way to do 'Math.floor'
var y = ~~( index / w );
//---------------------------------------------------------------
//draw an enlarged rectangle on the enlarged canvas
//---------------------------------------------------------------
ctx.fillRect( x*scale, y*scale, scale, scale );
}
//get the output image from the canvas
var output = c.toDataURL("image/png");
//returns image data that can be plugged into an img tag's src
return output;
}
Below is an example of it in use.
Your image would appear in the HTML like this:
<img id="pixel-image" src="" data-src="pixel-image.png"/>
The data-src tag contains the URL for the image you want to enlarge. This is a custom data tag. The code below will take the image URL from the data tag and put it through the resizing function, returning a larger image (30x the original size) which then gets injected into the src attribute of the img tag.
Remember to put the function Resize_Nearest_Neighbour (above) into the <script> tag before you include the following.
function Load_Image( element ){
var source = element.getAttribute("data-src");
var img = new Image();
img.addEventListener("load", function(){
var bigImage = Resize_Nearest_Neighbour( this, 30 );
element.src = bigImage;
});
img.src = source;
}
Load_Image( document.getElementById("pixel-image") );
There is no built-in way. You have to do it yourself with getImageData.
Based on Paul Irish's comment:
function resizeBase64(base64, zoom) {
return new Promise(function(resolve, reject) {
var img = document.createElement("img");
// once image loaded, resize it
img.onload = function() {
// get image size
var imageWidth = img.width;
var imageHeight = img.height;
// create and draw image to our first offscreen canvas
var canvas1 = document.createElement("canvas");
canvas1.width = imageWidth;
canvas1.height = imageHeight;
var ctx1 = canvas1.getContext("2d");
ctx1.drawImage(this, 0, 0, imageWidth, imageHeight);
// get pixel data from first canvas
var imgData = ctx1.getImageData(0, 0, imageWidth, imageHeight).data;
// create second offscreen canvas at the zoomed size
var canvas2 = document.createElement("canvas");
canvas2.width = imageWidth * zoom;
canvas2.height = imageHeight * zoom;
var ctx2 = canvas2.getContext("2d");
// draw the zoomed-up pixels to a the second canvas
for (var x = 0; x < imageWidth; ++x) {
for (var y = 0; y < imageHeight; ++y) {
// find the starting index in the one-dimensional image data
var i = (y * imageWidth + x) * 4;
var r = imgData[i];
var g = imgData[i + 1];
var b = imgData[i + 2];
var a = imgData[i + 3];
ctx2.fillStyle = "rgba(" + r + "," + g + "," + b + "," + a / 255 + ")";
ctx2.fillRect(x * zoom, y * zoom, zoom, zoom);
}
}
// resolve promise with the zoomed base64 image data
var dataURI = canvas2.toDataURL();
resolve(dataURI);
};
img.onerror = function(error) {
reject(error);
};
// set the img soruce
img.src = base64;
});
}
resizeBase64(src, 4).then(function(zoomedSrc) {
console.log(zoomedSrc);
});
https://jsfiddle.net/djhyquon/69/