Lock zoom of WMTS tiles in OL - zooming

I'm using tiles from IGN GĂ©oportail WMTS to render a background map as outlined in this example:
var resolutions = [];
var matrixIds = [];
var proj3857 = getProjection('EPSG:3857');
var maxResolution = getWidth(proj3857.getExtent()) / 256;
for (var i = 0; i < 18; i++) {
matrixIds[i] = i.toString();
resolutions[i] = maxResolution / Math.pow(2, i);
}
var tileGrid = new WMTSTileGrid({
origin: [-20037508, 20037508],
resolutions: resolutions,
matrixIds: matrixIds
});
var ign_source = new WMTS({
url: 'https://wxs.ign.fr/pratique/geoportail/wmts',
layer: 'GEOGRAPHICALGRIDSYSTEMS.MAPS',
matrixSet: 'PM',
format: 'image/jpeg',
projection: 'EPSG:3857',
tileGrid: tileGrid,
style: 'normal',
attributions: '<a href="http://www.geoportail.fr/" target="_blank">' +
'<img src="https://api.ign.fr/geoportail/api/js/latest/' +
'theme/geoportal/img/logo_gp.gif"></a>'
});
var ign = new TileLayer({
source: ign_source,
opacity: 0.7
});
On zoom level 12, the map shows the details I need:
However, zooming out to zoom level 13, the tiles switch to less details, too few for my use case:
Is there a way to tell OpenLayers to "lock" the tile zoom to 12 while still allowing zooming beyond this threshold rendered by OpenLayers instead of the WMTS?
Or in other words: Let OpenLayers do all the zooming and always get zoom level 12 detail tiles from the WMTS.
Thanks for your help!

This will restrict the tilegrid to zoom level (matrix id) 13:
var tileGrid = new WMTSTileGrid({
origin: [-20037508, 20037508],
resolutions: resolutions.slice(13,14),
matrixIds: matrixIds.slice(13,14)
});

Related

Google map API image overlay in vector map with tilt and heading

I place the image on the map using custom overlay example in
https://developers.google.com/maps/documentation/javascript/customoverlays
but when i rotate the map, overlay deforms. Is it possible to fix it?
enter image description here
I understand that this is happening because in the draw method
processing is carried out at two points - SouthWest and NorthEast.
draw() {
// We use the south-west and north-east
// coordinates of the overlay to peg it to the correct position and size.
// To do this, we need to retrieve the projection from the overlay.
const overlayProjection = this.getProjection();
// Retrieve the south-west and north-east coordinates of this overlay
// in LatLngs and convert them to pixel coordinates.
// We'll use these coordinates to resize the div.
const sw = overlayProjection.fromLatLngToDivPixel(
this.bounds.getSouthWest()
);
const ne = overlayProjection.fromLatLngToDivPixel(
this.bounds.getNorthEast()
);
// Resize the image's div to fit the indicated dimensions.
if (this.div) {
this.div.style.left = sw.x + "px";
this.div.style.top = ne.y + "px";
this.div.style.width = ne.x - sw.x + "px";
this.div.style.height = sw.y - ne.y + "px";
this.div.style.transform = 'rotate(' + this.angle + 'deg)';
}
Because of this, the image is deformed.
But I don't know how to set four vertices for an image.
i solved it using WebGLOverlayView
https://developers.google.com/maps/documentation/javascript/webgl/webgl-overlay-view
const webGLOverlayView = new google.maps.WebGLOverlayView();
webGLOverlayView.onAdd = () => {
scene = new THREE.Scene();
const fov = 180;
const aspect = 2; // the canvas default
const near = 1;
const far = 1;
camera = new THREE.PerspectiveCamera(fov, aspect, near, far);
const width = 800;
const height = 400;
const widthSegments = 800;
const heightSegments = 400;
const geometry = new THREE.PlaneBufferGeometry(width, height, widthSegments, heightSegments)
const loader = new THREE.TextureLoader();
const texture = loader.load('img920C.jpg');
texture.magFilter = THREE.NearestFilter
texture.minFilter = THREE.NearestMipMapNearestFilter
texture.anisotropy = 16
var material = new THREE.MeshBasicMaterial( { map: texture, } );
var cube = new THREE.Mesh( geometry, material );
cube.name = 'cube';
scene.add( cube );
}
webGLOverlayView.onContextRestored = ({gl}) => {
renderer = new THREE.WebGLRenderer({
canvas: gl.canvas,
context: gl,
antialias: false,
...gl.getContextAttributes(),
});
renderer.autoClear = false;
renderer.antialias = false;
const controls = new THREE.OrbitControls(camera, renderer.domElement);
};
webGLOverlayView.onDraw = ({gl, transformer}) => {
const latLngAltitudeLiteral = {
lat: marker.getPosition().lat,
lng: marker.getPosition().lng,
altitude: 0
}
const matrix = transformer.fromLatLngAltitude(latLngAltitudeLiteral);
camera.projectionMatrix = new THREE.Matrix4().fromArray(matrix);
webGLOverlayView.requestRedraw();
renderer.render(scene, camera);
renderer.resetState();
}
webGLOverlayView.setMap(map)
}
It looks like this is the correct approach. But what about mouse events, I don't know. I would like to receive a mousedown click event? for webGLOverlayView.
Something similar to:
webGLOverlayView.addListener('click', function(){console.log("ok")});

image using cordova plugin looks horrible on canvas

i am using cordova for my ios app which captures the image
the code looks
navigator.camera.getPicture(onSuccessCamera, onFailureCamera, {
quality: 25,
destinationType: navigator.camera.DestinationType.DATA_URL,
correctOrientation: true,
allowEdit:false
});
function onSuccessCamera(imageURI) {
var imgData = "data:image/jpeg;base64," + imageURI;
uploadFile(imgData);
}
function uploadFile(file){
var c=document.getElementById("picture");
c.width = window.innerWidth-50;//offset to prevent image flowing out of frame
// window.alert(window.innerWidth+":"+ window.innerHeight);414:736
c.height = "330";//window.innerHeight;//this.height;
var ctx=c.getContext("2d");
var showImg = new Image();
showImg.onload = function(){
var ratio = 1;
if (this.height > c.height) {
ratio = c.height/this.height;
}
else if (this.width>c.width) {
ratio = c.width/this.width;
}
ctx.drawImage(this,0,0, this.width*ratio, this.height*ratio);
// window.alert(c.width + ':' + c.height);
};
showImg.src = file;
}
i dont know why the image looks so horrible
It's because of the retina screen.
Make the height and width of the canvas to be the double, but then style it to be half pixels
c.width = (window.innerWidth-50)*2;//offset to prevent image flowing out of frame
c.height = 330*2;//window.innerHeight;//this.height;
c.style.width = (c.width/2)+"px";
c.style.height = (c.height/2)+"px";
Also, you can consider using a higher value on quality camera option.

Google Maps V3 tooltip popup with dynamic auto-positioning

I'm looking for a popup that will work with Google Maps V3 that supports auto-positioning the popup in relation to the marker such that the whole popup window is always visible within the map viewport. I'm trying to hack the InfoBox library to make this work but it's proving to be a big hassle. I've also looked at QTip2 which shows some promise but also has some shortcomings such as having positioning, but it must be set manually.
EDIT: The solution needs to to not pan the map to show the popup window.
I found SmartInfoWindow in a quick search at the V3 Demo Gallery. It seems like it does just what you want. Here's the google code project.
I was able to add on to InfoBox to get it to do what I wanted with a little help from SmartInfoWindow. This is a partially customized solution so you may need to tweak the positioning. You just grab the div position of each corner of the InfoBox, convert those corners back to lat/lng and then grab the maps bounds and see if the corners are within the bounds. If they aren't then you adjust the InfoBox position accordingly depending on which corners are off the map.
InfoBox.prototype.maybePanMap = function() {
// if we go beyond map, pan map
var map = this.getMap();
var projection = this.getProjection();
var bounds = map.getBounds();
if (!bounds) return;
// The dimension of the infowindow
var iwWidth = this.div_.offsetWidth;
var iwHeight = this.div_.offsetHeight;
// The offset position of the infowindow
var iwOffsetX = this.pixelOffset_.width;
var iwOffsetY = this.pixelOffset_.height;
var anchorPixel = projection.fromLatLngToDivPixel(this.getPosition());
var bl = new google.maps.Point(anchorPixel.x + iwOffsetX,
anchorPixel.y + iwOffsetY);
var br = new google.maps.Point(anchorPixel.x + iwOffsetX + iwWidth,
anchorPixel.y + iwOffsetY);
var tl = new google.maps.Point(anchorPixel.x + iwOffsetX,
anchorPixel.y + iwOffsetY - iwHeight + 100);
var tr = new google.maps.Point(anchorPixel.x + iwOffsetX + iwWidth,
anchorPixel.y + iwOffsetY - iwHeight + 100);
var sw = projection.fromDivPixelToLatLng(bl);
var se = projection.fromDivPixelToLatLng(br);
var nw = projection.fromDivPixelToLatLng(tl);
var ne = projection.fromDivPixelToLatLng(tr);
if (!map.getBounds().contains(nw) && !map.getBounds().contains(sw)) {
this.div_.style.left = (anchorPixel.x + 10) + 'px';
if (!map.getBounds().contains(ne)) {
this.div_.style.top = (anchorPixel.y - 100) + 'px';
}
} else if (!map.getBounds().contains(nw) && !map.getBounds().contains(ne)) {
this.div_.style.top = (anchorPixel.y - 100) + 'px';
if (!map.getBounds().contains(se)) {
this.div_.style.left = (anchorPixel.x - iwWidth - 10) + 'px';
}
} else if (!map.getBounds().contains(ne) && !map.getBounds().contains(se)) {
this.div_.style.left = (anchorPixel.x - iwWidth - 10) + 'px';
if (!map.getBounds().contains(sw)) {
this.div_.style.top = (anchorPixel.y - iwHeight - 100) + 'px';
}
}
};

HTML5 canvas: is there a way to resize image with "nearest neighbour" resampling?

I have some JS that makes some manipulations with images. I want to have pixelart-like graphics, so I had to enlarge original images in graphics editor.
But I think it'd be good idea to make all the manipulations with the small image and then enlarge it with html5 functionality. This will save bunch of processing time (because now my demo (warning: domain-name may cause some issues at work etc) loads extremely long in Firefox, for example).
But when I try to resize the image, it gets resampled bicubically. How to make it resize image without resampling? Is there any crossbrowser solution?
image-rendering: -webkit-optimize-contrast; /* webkit */
image-rendering: -moz-crisp-edges /* Firefox */
http://phrogz.net/tmp/canvas_image_zoom.html can provide a fallback case using canvas and getImageData. In short:
// Create an offscreen canvas, draw an image to it, and fetch the pixels
var offtx = document.createElement('canvas').getContext('2d');
offtx.drawImage(img1,0,0);
var imgData = offtx.getImageData(0,0,img1.width,img1.height).data;
// Draw the zoomed-up pixels to a different canvas context
for (var x=0;x<img1.width;++x){
for (var y=0;y<img1.height;++y){
// Find the starting index in the one-dimensional image data
var i = (y*img1.width + x)*4;
var r = imgData[i ];
var g = imgData[i+1];
var b = imgData[i+2];
var a = imgData[i+3];
ctx2.fillStyle = "rgba("+r+","+g+","+b+","+(a/255)+")";
ctx2.fillRect(x*zoom,y*zoom,zoom,zoom);
}
}
More: MDN docs on image-rendering
I wrote a NN resizing script a while ago using ImageData (around line 1794)
https://github.com/arahaya/ImageFilters.js/blob/master/imagefilters.js
You can see a demo here
http://www.arahaya.com/imagefilters/
unfortunately the builtin resizing should be slightly faster.
This CSS on the canvas element works:
image-rendering: pixelated;
This works in Chrome 93, as of September 2021.
You can simply set context.imageSmoothingEnabled to false. This will make everything drawn with context.drawImage() resize using nearest neighbor.
// the canvas to resize
const canvas = document.createElement("canvas");
// the canvas to output to
const canvas2 = document.createElement("canvas");
const context2 = canvas2.getContext("2d");
// disable image smoothing
context2.imageSmoothingEnabled = false;
// draw image from the canvas
context2.drawImage(canvas, 0, 0, canvas2.width, canvas2.height);
This has better support than using image-rendering: pixelated.
I'll echo what others have said and tell you it's not a built-in function. After running into the same issue, I've made one below.
It uses fillRect() instead of looping through each pixel and painting it. Everything is commented to help you better understand how it works.
//img is the original image, scale is a multiplier. It returns the resized image.
function Resize_Nearest_Neighbour( img, scale ){
//make shortcuts for image width and height
var w = img.width;
var h = img.height;
//---------------------------------------------------------------
//draw the original image to a new canvas
//---------------------------------------------------------------
//set up the canvas
var c = document.createElement("CANVAS");
var ctx = c.getContext("2d");
//disable antialiasing on the canvas
ctx.imageSmoothingEnabled = false;
//size the canvas to match the input image
c.width = w;
c.height = h;
//draw the input image
ctx.drawImage( img, 0, 0 );
//get the input image as image data
var inputImg = ctx.getImageData(0,0,w,h);
//get the data array from the canvas image data
var data = inputImg.data;
//---------------------------------------------------------------
//resize the canvas to our bigger output image
//---------------------------------------------------------------
c.width = w * scale;
c.height = h * scale;
//---------------------------------------------------------------
//loop through all the data, painting each pixel larger
//---------------------------------------------------------------
for ( var i = 0; i < data.length; i+=4 ){
//find the colour of this particular pixel
var colour = "#";
//---------------------------------------------------------------
//convert the RGB numbers into a hex string. i.e. [255, 10, 100]
//into "FF0A64"
//---------------------------------------------------------------
function _Dex_To_Hex( number ){
var out = number.toString(16);
if ( out.length < 2 ){
out = "0" + out;
}
return out;
}
for ( var colourIndex = 0; colourIndex < 3; colourIndex++ ){
colour += _Dex_To_Hex( data[ i+colourIndex ] );
}
//set the fill colour
ctx.fillStyle = colour;
//---------------------------------------------------------------
//convert the index in the data array to x and y coordinates
//---------------------------------------------------------------
var index = i/4;
var x = index % w;
//~~ is a faster way to do 'Math.floor'
var y = ~~( index / w );
//---------------------------------------------------------------
//draw an enlarged rectangle on the enlarged canvas
//---------------------------------------------------------------
ctx.fillRect( x*scale, y*scale, scale, scale );
}
//get the output image from the canvas
var output = c.toDataURL("image/png");
//returns image data that can be plugged into an img tag's src
return output;
}
Below is an example of it in use.
Your image would appear in the HTML like this:
<img id="pixel-image" src="" data-src="pixel-image.png"/>
The data-src tag contains the URL for the image you want to enlarge. This is a custom data tag. The code below will take the image URL from the data tag and put it through the resizing function, returning a larger image (30x the original size) which then gets injected into the src attribute of the img tag.
Remember to put the function Resize_Nearest_Neighbour (above) into the <script> tag before you include the following.
function Load_Image( element ){
var source = element.getAttribute("data-src");
var img = new Image();
img.addEventListener("load", function(){
var bigImage = Resize_Nearest_Neighbour( this, 30 );
element.src = bigImage;
});
img.src = source;
}
Load_Image( document.getElementById("pixel-image") );
There is no built-in way. You have to do it yourself with getImageData.
Based on Paul Irish's comment:
function resizeBase64(base64, zoom) {
return new Promise(function(resolve, reject) {
var img = document.createElement("img");
// once image loaded, resize it
img.onload = function() {
// get image size
var imageWidth = img.width;
var imageHeight = img.height;
// create and draw image to our first offscreen canvas
var canvas1 = document.createElement("canvas");
canvas1.width = imageWidth;
canvas1.height = imageHeight;
var ctx1 = canvas1.getContext("2d");
ctx1.drawImage(this, 0, 0, imageWidth, imageHeight);
// get pixel data from first canvas
var imgData = ctx1.getImageData(0, 0, imageWidth, imageHeight).data;
// create second offscreen canvas at the zoomed size
var canvas2 = document.createElement("canvas");
canvas2.width = imageWidth * zoom;
canvas2.height = imageHeight * zoom;
var ctx2 = canvas2.getContext("2d");
// draw the zoomed-up pixels to a the second canvas
for (var x = 0; x < imageWidth; ++x) {
for (var y = 0; y < imageHeight; ++y) {
// find the starting index in the one-dimensional image data
var i = (y * imageWidth + x) * 4;
var r = imgData[i];
var g = imgData[i + 1];
var b = imgData[i + 2];
var a = imgData[i + 3];
ctx2.fillStyle = "rgba(" + r + "," + g + "," + b + "," + a / 255 + ")";
ctx2.fillRect(x * zoom, y * zoom, zoom, zoom);
}
}
// resolve promise with the zoomed base64 image data
var dataURI = canvas2.toDataURL();
resolve(dataURI);
};
img.onerror = function(error) {
reject(error);
};
// set the img soruce
img.src = base64;
});
}
resizeBase64(src, 4).then(function(zoomedSrc) {
console.log(zoomedSrc);
});
https://jsfiddle.net/djhyquon/69/

Setting maximum and minimum bounds using Google Maps API

Was wondering if someone could provide an example of how to set maximum and minimum extents using GMaps2? In OpenLayers, here's how it is done:
var point1 = new OpenLayers.Geometry.Point(7, 48);
point1.transform(new OpenLayers.Projection("EPSG:4326"), new OpenLayers.Projection("EPSG:900913"));
var point2 = new OpenLayers.Geometry.Point(11, 54);
point2.transform(new OpenLayers.Projection("EPSG:4326"), new OpenLayers.Projection("EPSG:900913"));
var bounds = new OpenLayers.Bounds();
bounds.extend(point1);
bounds.extend(point2);
bounds.toBBOX();
map = new OpenLayers.Map("map", {
maxExtent: bounds,
maxResolution:"auto",
maxZoomLevel: 8,
projection:"EPSG:900913",
controls: []
});
map.addLayer(new OpenLayers.Layer.OSM.Osmarender("Osmarender"));
map.zoomToMaxExtent();
//map.zoomToExtent(bounds);
What would be the GMaps2 equivalent?
The following is the Google Maps API 2.x equivalent to limit the panning of the map to predefined bounds:
// Bounds for North America
var allowedBounds = new GLatLngBounds(new GLatLng(48.197218, -127.529297),
new GLatLng(28.72913, -68.818359));
function checkBounds() {
if (allowedBounds.contains(map.getCenter())) {
return;
}
var c = map.getCenter();
var x = c.lng();
var y = c.lat();
var maxX = allowedBounds.getNorthEast().lng();
var maxY = allowedBounds.getNorthEast().lat();
var minX = allowedBounds.getSouthWest().lng();
var minY = allowedBounds.getSouthWest().lat();
if (x < minX) {x = minX;}
if (x > maxX) {x = maxX;}
if (y < minY) {y = minY;}
if (y > maxY) {y = maxY;}
map.setCenter(new GLatLng(y, x));
}
GEvent.addListener(map, "move", function() { checkBounds(); });
Thanks for getting back to me Chris and Daniel. What I meant by 'extents' is that when zoomed out is maximized, the world map doesn't tile. I don't want to see 2, 3 or even 4 North America's and other continents. The world is big enough with just one. When I zoom in, I want to set a limit to how far one can zoom in to account for our data resolution. Zoom in too far returns results outside our data resolution. I did manage to find an acceptable solution by restricting the zoom levels. Anyway, FWIW, here's what I came up. It's a bit less verbose but isn't doing exactly what Daniel does in his code:
//===== Restrict Zoom Levels =====
var mapTypes = map.getMapTypes();
// Overwrite the getMinimumResolution() and getMaximumResolution() methods
for (var i=0; i<mapTypes.length; i++) {
mapTypes[i].getMinimumResolution = function() {return 2;}
mapTypes[i].getMaximumResolution = function() {return 9;}
}
I still want to try Daniel's code to see what it does. Special thanks to you for taking the time to write this up Dan.
-=Al