I'm working on the project to detect the object from GEOTiff files and return coordinates of the objects and those output will use for drone to fly to those coordinate
I use tensorflow with YOLO v2(image detector framework) and OpenCV to detect the objects that I need in GEOTiff
import cv2
from darkflow.net.build import TFNet
import math
import gdal
# initial stage for YOLO v2
options = {
'model': 'cfg/yolo.cfg',
'load': 'bin/yolov2.weights',
'threshold': 0.4,
}
tfnet = TFNet(options)
# OpenCV read Image
img = cv2.imread('final.tif', cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#Predict the image
result = tfnet.return_predict(img)
#Calculate the center and radius of each objects
i = 0
while i < len(result):
tl = (result[i]['topleft']['x'], result[i]['topleft']['y'])
br = (result[i]['bottomright']['x'], result[i]['bottomright']['y'])
point = (int((result[i]['topleft']['x']+result[i]['bottomright']['x'])/2), int((result[i]['topleft']['y']+result[i]['bottomright']['y'])/2))
radius = int(math.hypot(result[i]['topleft']['x'] - point[0], result[i]['topleft']['y'] - point[1]))
label = result[i]['label']
result[i]['pointx'] = point[0]
result[i]['pointy'] = point[1]
result[i]['radius'] = radius
i += 1
print(result)
So the results come out like set of JSON
[{'label': 'person', 'confidence': 0.6090355, 'topleft': {'x': 3711, 'y': 1310}, 'bottomright': {'x': 3981, 'y': 1719}, 'pointx': 3846, 'pointy': 1514, 'radius': 244}]
as you can see the location of the object is return in pixel (x,y)
and I want to use these x,y to convert to coordinate in lat,lng
so I try to use GDAL (the library use for read the GEO infomation that contain inside the image)
so here's the GEO infomation of the image by using gdalinfo in terminal
Driver: GTiff/GeoTIFF
Files: final.tif
Size is 8916, 6888
Coordinate System is:
PROJCS["WGS 84 / UTM zone 47N",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",99],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["Easting",EAST],
AXIS["Northing",NORTH],
AUTHORITY["EPSG","32647"]]
Origin = (667759.259870000067167,1546341.352920000208542)
Pixel Size = (0.032920000000000,-0.032920000000000)
Metadata:
AREA_OR_POINT=Area
TIFFTAG_SOFTWARE=pix4dmapper
Image Structure Metadata:
COMPRESSION=LZW
INTERLEAVE=PIXEL
Corner Coordinates:
Upper Left ( 667759.260, 1546341.353) (100d33'11.42"E, 13d58'57.03"N)
Lower Left ( 667759.260, 1546114.600) (100d33'11.37"E, 13d58'49.65"N)
Upper Right ( 668052.775, 1546341.353) (100d33'21.20"E, 13d58'56.97"N)
Lower Right ( 668052.775, 1546114.600) (100d33'21.15"E, 13d58'49.59"N)
Center ( 667906.017, 1546227.976) (100d33'16.29"E, 13d58'53.31"N)
Band 1 Block=8916x1 Type=Byte, ColorInterp=Red
NoData Value=-10000
Band 2 Block=8916x1 Type=Byte, ColorInterp=Green
NoData Value=-10000
Band 3 Block=8916x1 Type=Byte, ColorInterp=Blue
NoData Value=-10000
Band 4 Block=8916x1 Type=Byte, ColorInterp=Alpha
NoData Value=-10000
Any one?
You need to transform pixel coordinates to geographic space using the GeoTransform matrix that is associated to your raster files. Using GDAL you could do something like the following:
# open the dataset and get the geo transform matrix
ds = gdal.Open('final.tif')
xoffset, px_w, rot1, yoffset, px_h, rot2 = ds.GetGeoTransform()
# supposing x and y are your pixel coordinate this
# is how to get the coordinate in space.
posX = px_w * x + rot1 * y + xoffset
posY = rot2 * x + px_h * y + yoffset
# shift to the center of the pixel
posX += px_w / 2.0
posY += px_h / 2.0
Of course the position you get will be relative to the same coordinate reference system that is used for your raster dataset. So if you need to transform it to lat/long you will have to do further elaborations:
# get CRS from dataset
crs = osr.SpatialReference()
crs.ImportFromWkt(ds.GetProjectionRef())
# create lat/long crs with WGS84 datum
crsGeo = osr.SpatialReference()
crsGeo.ImportFromEPSG(4326) # 4326 is the EPSG id of lat/long crs
t = osr.CoordinateTransformation(crs, crsGeo)
(lat, long, z) = t.TransformPoint(posX, posY)
Sorry I'm not really fluent in python, so probably you will have to adapt this code. Checkout the documentation of GeoTransform here for the C++ API to learn more about the matrix elements.
Without the excellent and clear Python code posted by Gabriella, I don't know if I would have ever figured out how to do this in C. The documentation and examples for gdal are amazingly sparse.
Here's a C version of Gabriella's code:
const char fn[] = "/path/to/geo/file.tif";
GDALDatasetH hDataset;
GDALAllRegister(); // Register all GDAL formats
hDataset = GDALOpen( fn, GA_ReadOnly ); // Open our geo file (GeoTIFF or other supported format)
if (hDataset == NULL)
{
printf("Failed to open dataset\n");
return;
}
// These are the input points to be transformed, in pixel coordinates of the source raster file
double x = 20;
double y = 20;
double adfGeoTransform[6];
GDALGetGeoTransform( hDataset, adfGeoTransform );
// Put the returned transform values into named vars for readability
double xoffset = adfGeoTransform[0];
double px_w = adfGeoTransform[1];
double rot1 = adfGeoTransform[2];
double yoffset = adfGeoTransform[3];
double rot2 = adfGeoTransform[4];
double px_h = adfGeoTransform[5];
// Apply transform to x,y. Put into posX,posY
double posX = px_w * x + rot1 * y + xoffset;
double posY = rot2 * x + px_h * y + yoffset;
// Transform to center of pixel
posX += px_w / 2.0;
posY += px_h / 2.0;
OGRErr err = 0;
// sr0 is the "from" spatial reference, pulled out of our file
OGRSpatialReferenceH sr0 = OSRNewSpatialReference(GDALGetProjectionRef(hDataset));
// sr1 is the "to" spatial reference, initialized as EPSG 4326 (lat/lon)
OGRSpatialReferenceH sr1 = OSRNewSpatialReference(NULL);
err = OSRImportFromEPSG(sr1, 4326);
double xtrans = posX;
double ytrans = posY;
double ztrans = 0;
int pabSuccess = 0;
// Make our transformation object
OGRCoordinateTransformationH trans = OCTNewCoordinateTransformation(sr0, sr1);
// Transform our point posX,posY, put it into xTrans,yTrans
OCTTransformEx(trans, 1, &xtrans, &ytrans, &ztrans, &pabSuccess);
GDALClose(hDataset);
printf("map coordinates (%f, %f)\n", xtrans, ytrans);
Related
i am using pyBullet, which is python wrapper to bullet3 physics engine and i need to create point cloud from virtual camera.
This engine uses basic OpenGL renderer and i am able to get values from OpenGL depth buffer
img = p.getCameraImage(imgW, imgH, renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgbBuffer = img[2]
depthBuffer = img[3]
Now i have width*height array with depth values. How can i get world coordinates from this? i tried to save .ply point cloud with points (width, height, depthBuffer(width, height)) but this doesn't create point cloud that looks like objects on the scene.
I also tried to correct depth with near far plane:
depthImg = float(depthBuffer[h, w])
far = 1000.
near = 0.01
depth = far * near / (far - (far - near) * depthImg)
but result with this was also some weird point cloud. How can i create realistic point cloud from data from depth buffer? is it even possible?
i did something similar in c++, but there i used glm::unproject
for (size_t i = 0; i < height; i = i = i+density) {
for (size_t j = 0; j < width; j = j = j+density) {
glm::vec3 win(i, j, depth);
glm::vec4 position(glm::unProject(win, identity, projection, viewport), 0.0);
EDIT:
based on Rabbid76 answer i used PyGLM which worked, i am now able to obtain XYZ world coordinates to create point cloud, but depth values in point cloud look distorted, am i getting depth from depth buffer correctly?
for h in range(0, imgH, stepX):
for w in range(0, imgW, stepY):
depthImg = float(np.array(depthBuffer)[h, w])
far = 1000.
near = 0.01
depth = far * near / (far - (far - near) * depthImg)
win = glm.vec3(h, w, depthBuffer[h][w])
position = glm.unProject(win, model, projGLM, viewport)
f.write(str(position[0]) + " " + str(position[1]) + " " + str(depth) + "\n")
Here is my solution. We just need to know how the view Matrix and the projection matrix work. There are computeProjectionMatrixFOV and computeViewMatrix funtions in pybullet.
http://www.songho.ca/opengl/gl_projectionmatrix.html and http://ksimek.github.io/2012/08/22/extrinsic/
In a word, point_in_world = inv(projection_matrix * viewMatrix) * NDC_pos
glm.unProject is an another solution
stepX = 10
stepY = 10
pointCloud = np.empty([np.int(img_height/stepY), np.int(img_width/stepX), 4])
projectionMatrix = np.asarray(projection_matrix).reshape([4,4],order='F')
viewMatrix = np.asarray(view_matrix).reshape([4,4],order='F')
tran_pix_world = np.linalg.inv(np.matmul(projectionMatrix, viewMatrix))
for h in range(0, img_height, stepY):
for w in range(0, img_width, stepX):
x = (2*w - img_width)/img_width
y = -(2*h - img_height)/img_height # be careful! deepth and its corresponding position
z = 2*depth_np_arr[h,w] - 1
pixPos = np.asarray([x, y, z, 1])
position = np.matmul(tran_pix_world, pixPos)
pointCloud[np.int(h/stepY),np.int(w/stepX),:] = position / position[3]
I'm trying to get the value of a latitude and longitude from a subdataset of a HDF file using gdal. But I'm getting the following error:
IndexError: index -62399 is out of bounds for axis 1 with size 4800
Here's my code:
from osgeo import ogr, osr,gdal
hdf_file = gdal.Open("MOD13Q1.A2017321.h31v10.006.2017337222145.hdf")
subDatasets = hdf_file.GetSubDatasets()
val_dict = {}
#print subDatasets[0]
dataset = gdal.Open(subDatasets[1][0])
transf = dataset.GetGeoTransform()
success,transfInv = gdal.InvGeoTransform(transf)
ds = dataset.ReadAsArray()
#lon,lat = -17.586972, 139.158043
lat = -16.718853
lon = 142.645773
px, py = gdal.ApplyGeoTransform(transfInv, lon, lat)
value = ds[int(px),int(py)]
print value
Can anyone tell me what I'm doing wrong ?
If you look at the geotransform (transf) of the dataset you can see that the coordinates are not in degrees lat/lon (its Sinusoidal).
Therefore you shouldn't provide lat/lon values when applying the geotransform to convert to pixel coordinates. These values should be in the same projection as the dataset.
For example, if you enter the coordinates of the upper-left corner, you will get (0,0) as a result:
mapx = transf[0]
mapy = transf[3]
px, py = gdal.ApplyGeoTransform(transfInv, mapx, mapy)
Or for the lower-right corner:
mapx = transf[0] + transf[1] * ds.shape[1]
mapy = transf[3] + transf[5] * ds.shape[0]
px, py = gdal.ApplyGeoTransform(transfInv, mapx, mapy)
Which results in (4800, 4800).
I am loading a image from google static Map API, the loaded satellite image is a place with hundreds of meters wide and length.
https://maps.googleapis.com/maps/api/staticmap?center=53.4055429,-2.9976502&zoom=16&size=400x400&maptype=satellite&key=YOUR_API_KEY
Additionally, the image resolution shows to be 10 meters, as shown below
.
My question is
as I have known the centered geolocation (53.4055429,-2.9976502) and resolution of this static image, how would I be able to extend it to calculate the geolocation of left up or right bottom in the image, and finally calculate each pixel of the image
What kind of solution is it
Looks like you need not a javascript solution but for python to use it not in browser but on a server. I've created a python example, but it is the math that I am going to stand on, math is all you need to calculate coordinates. Let me do it with js as well to make snippet work in browser. You can see, that python and js give the same results.
Jump to the answer
If you just need formulae for degrees per pixel, here you are. They are simple enough and you don't need any external libraries but just a python's math. The explanation can be found further.
#!/usr/bin/python
import math
w = 400
h = 400
zoom = 16
lat = 53.4055429
lng = -2.9976502
def getPointLatLng(x, y):
parallelMultiplier = math.cos(lat * math.pi / 180)
degreesPerPixelX = 360 / math.pow(2, zoom + 8)
degreesPerPixelY = 360 / math.pow(2, zoom + 8) * parallelMultiplier
pointLat = lat - degreesPerPixelY * ( y - h / 2)
pointLng = lng + degreesPerPixelX * ( x - w / 2)
return (pointLat, pointLng)
print 'NE: ', getPointLatLng(w, 0)
print 'SW: ', getPointLatLng(0, h)
print 'NW: ', getPointLatLng(0, 0)
print 'SE: ', getPointLatLng(w, h)
The output of the script is
$ python getcoords.py
NE: (53.40810128625675, -2.9933586655761717)
SW: (53.40298451374325, -3.001941734423828)
NW: (53.40810128625675, -3.001941734423828)
SE: (53.40298451374325, -2.9933586655761717)
What we have to start with
We have some parameters needed in url https://maps.googleapis.com/maps/api/staticmap?center=53.4055429,-2.9976502&zoom=16&size=400x400&maptype=satellite&key=YOUR_API_KEY – coordinates, zoom, size in pixels.
Let's introduce some initial variables:
var config = {
lat: 53.4055429,
lng: -2.9976502,
zoom: 16,
size: {
x: 400,
y: 400,
}
};
The math of the Earth of 512 pixels
The math is as follows. Zoom 1 stands for full view of the Earth equator 360° when using image size 512 (see the docs for size and zoom). See the example at zoom 1. It is a very important point. The scale (degrees per pixel) doesn't depend on the image size. When one changes image size, one sees the same scale: compare 1 and 2 – the second image is a cropped version of the bigger one. The maximum image size for googleapis is 640.
Every zoom-in increases resolution twice. Therefore the width of your image in terms of longitude is
lngDegrees = 360 / 2**(zoom - 1); // full image width in degrees, ** for power
Then use linear function to find coordinates for any point of the image. It should be mentioned, that linearity works well only for high zoomed images, you can't use it for low zooms like 5 or less. Low zooms have slightly more complex math.
lngDegreesPerPixel = lngDegrees / 512 = 360 / 2**(zoom - 1) / 2**9 = 360 / 2**(zoom + 8);
lngX = config.lng + lngDegreesPerPixel * ( point.x - config.size.x / 2);
Latitude degrees are different
Latitude degree and longitude degree on the equator are of the same size, but if we go north or south, longitude degree become smaller since rings of parallels on the Earth have smaller radii - r = R * cos(lat) < R and therefore image height in degrees becomes smaller (see P.S.).
latDegrees = 360 / 2**(zoom - 1) * cos(lat); // full image height in degrees, ** for power
And respectively
latDegreesPerPixel = latDegrees / 512 = 360 / 2**(zoom - 1) * cos(lat) / 2**9 = 360 / 2**(zoom + 8) * cos(lat);
latY = config.lat - latDegreesPerPixel * ( point.y - config.size.y / 2)
The sign after config.lat differs from the sign for lngX since earth longitude direction coincide with image x direction, but latitude direction is opposed to y direction on the image.
So we can make a simple function now to find a pixel's coordinates using its x and y coordinates on the picture.
var config = {
lat: 53.4055429,
lng: -2.9976502,
zoom: 16,
size: {
x: 400,
y: 400,
}
};
function getCoordinates(x, y) {
var degreesPerPixelX = 360 / Math.pow(2, config.zoom + 8);
var degreesPerPixelY = 360 / Math.pow(2, config.zoom + 8) * Math.cos(config.lat * Math.PI / 180);
return {
lat: config.lat - degreesPerPixelY * ( y - config.size.y / 2),
lng: config.lng + degreesPerPixelX * ( x - config.size.x / 2),
};
}
console.log('SW', getCoordinates(0, config.size.y));
console.log('NE', getCoordinates(config.size.x, 0));
console.log('SE', getCoordinates(config.size.x, config.size.y));
console.log('NW', getCoordinates(0, 0));
console.log('Something at 300,128', getCoordinates(300, 128));
P.S. You can probably ask me, why I place cos(lat) multiplier to latitude, not as a divider to longitude formula. I found, that google chooses to have constant longitude scale per pixel on different latitudes, so, cos goes to latitude as a multiplier.
I believe you can calculate a bounding box using Maps JavaScript API.
You have a center position and know that distance from the center to the NorthEast and SouthWest is 200 pixels, because the size in your example is 400x400.
Have a look at the following code that calculates NE and SW points
var map;
function initMap() {
var latLng = new google.maps.LatLng(53.4055429,-2.9976502);
map = new google.maps.Map(document.getElementById('map'), {
center: latLng,
zoom: 16,
mapTypeId: google.maps.MapTypeId.SATELLITE
});
var marker = new google.maps.Marker({
position: latLng,
map: map
});
google.maps.event.addListener(map, "idle", function() {
//Verical and horizontal distance from center in pixels
var h = 200;
var w = 200;
var centerPixel = map.getProjection().fromLatLngToPoint(latLng);
var pixelSize = Math.pow(2, -map.getZoom());
var nePoint = new google.maps.Point(centerPixel.x + w*pixelSize, centerPixel.y - h*pixelSize);
var swPoint = new google.maps.Point(centerPixel.x - w*pixelSize, centerPixel.y + h*pixelSize);
var ne = map.getProjection().fromPointToLatLng(nePoint);
var sw = map.getProjection().fromPointToLatLng(swPoint);
var neMarker = new google.maps.Marker({
position: ne,
map: map,
title: "NE: " + ne.toString()
});
var swMarker = new google.maps.Marker({
position: sw,
map: map,
title: "SW: " + sw.toString()
});
var polygon = new google.maps.Polygon({
paths: [ne, new google.maps.LatLng(ne.lat(),sw.lng()), sw, new google.maps.LatLng(sw.lat(),ne.lng())],
map: map,
strokeColor: "green"
});
console.log("NE: " + ne.toString());
console.log("SW: " + sw.toString());
});
}
#map {
height: 100%;
}
/* Optional: Makes the sample page fill the window. */
html, body {
height: 100%;
margin: 0;
padding: 0;
}
<div id="map"></div>
<script src="https://maps.googleapis.com/maps/api/js?key=AIzaSyDztlrk_3CnzGHo7CFvLFqE_2bUKEq1JEU&libraries=geometry&callback=initMap"
async defer></script>
I hope this helps!
UPDATE
In order to solve this in python you should understand the Map and Tile Coordinates principles used by Google Maps JavaScript API and implement projection logic similar to Google Maps API in python.
Fortunately, somebody has already did this task and you can find the project that implements methods similar to map.getProjection().fromLatLngToPoint() and map.getProjection().fromPointToLatLng() from my example in python. Have a look at this project in github:
https://github.com/hrldcpr/mercator.py
So, you can download mercator.py and use it in your project. My JavaScript API example converts into the following python code
#!/usr/bin/python
from mercator import *
w = 200
h = 200
zoom = 16
lat = 53.4055429
lng = -2.9976502
centerPixel = get_lat_lng_tile(lat, lng, zoom)
pixelSize = pow(2, -zoom)
nePoint = (centerPixel[0] + w*pixelSize, centerPixel[1] - h*pixelSize)
swPoint = (centerPixel[0] - w*pixelSize, centerPixel[1] + h*pixelSize)
ne = get_tile_lat_lng(zoom, nePoint[0], nePoint[1]);
sw = get_tile_lat_lng(zoom, swPoint[0], swPoint[1]);
print 'NorthEast: ', ne
print 'SouthWest: ', sw
I'm building a project requiring large amounts of google maps images. I defined these functions to be used in another function that will automatically collect images. The latitude changes nicely, but I've noticed the longitude is slightly off. Is that an artifact of the approximate Mercator projection method? I was under the impression that the conversion I've used was pretty accurate except on approaching the poles.
import math
import os
import DLMaps
#Finds the distance covered in a Static Maps image pixel
def PixDist(zoom,scale=2):
earthCirc = 40075.0 #in Km's
base = 256 #size of google maps at zoom = 0
return earthCirc/(base*scale*(2**zoom))
#Finds the Km distance to the next google static maps image based on size of images,
# and distance per pixel
def DistNextImage(distpp, scale=2, size=640):
return distpp*scale*size
#returns a new Lat, Lon co-ordinate given a starting point, km distance change and
# a NESW direction, values 1-4 being used to represent corresponding direction.
def NewLatLon(lat,lon, dist, direction):
if direction==1:
dist = dist/110.54 #approximate change in latitude mercator projection
lat = lat + dist #heading north
elif direction == 2:
dist = dist/(110.32 * math.cos(math.pi*lat/180.0)) #approx change in lon
lon = lon + dist
elif direction==3:
dist = dist/110.54 #approximate change in latitude mercator projection
lat = lat - dist #heading south
elif direction ==4:
dist = dist/(110.32 * math.cos(math.pi*lat/180.0)) #approx change in lon
lon = lon - dist
return lat, lon
The earth is not a true ellipsoid, there are a high number of coordinate systems, and passing from one system to another one is far from simple. You could have a look to pyproj a Python interface to the well known proj.4 library to convert from Lat-Lon (I assume WGS84 ...) to almost any other coordinate including of course Mercator. You could try to roll your own, but there are so many caveats such as different origin meridians, slight differences in reference ellipsoid, that you have little hope to have correct and accurate results.
But you have some reference material on WGS84 on wikipedia
I made an object that does similar calculations. Maybe it might give you some inspiration.
Basically I treat the earth as an ellipsoid. earthCirc along the equator is not the same as earthCirc through the poles.
I try to make conversions between distances in meter <-> angles of lat & lng.
See if my object is more accurate than yours (mine surely has bugs, if you use some extreme values)
/**
* #file: Javascript object to help calculate with latitude, longitude, together with distances (in meter) and angles.
* The initial goal was to calculate the end point (in lat and latitude & longitude) of a line perpendicular to another line.
*
* Planet Earth is approximately an ellipsoid. The circumference along the equator is
* somewhat greater than the equator through both poles.
* this javascript object makes calculations that are useful for Google Maps.
* This will allow to use pythagoras for coordinates, as if earth is a flat rectangle.
* The precision of the results decreases when the distances increase; and near the poles.
* Any calculation where the latitude goes beyond the poles ( > 90 or < -90 ) will probably return complete nonsence.
*
* #author: Emmanuel Delay, emmanueldelay#gmail.com
* copyleft 2014. Feel free to use, copy, share, improve
* Please send me the code, if you make improvements.
*
* Examples:
<script>
function log(message) {
document.getElementById('log').innerHTML += message + '<br>';
}
window.onload = function() {
var dLatLng = Earth.xy2LatLng(5000000, 5000000, 0.0);
latLng = [dLatLng.lat, dLatLng.lng ];
log(
'Start from 0,0 ; move 5000km to the north, 5000km to the east: ' +
latLng[0] +','+ latLng[1]
);
var eifel = {lat: 48.8582186, lng: 2.2946114};
var dLatLng = Earth.xy2LatLng(1000, 2000, eifel.lat);
latLng = [dLatLng.lat, dLatLng.lng ];
var dest = [eifel.lat + latLng[0], eifel.lng + latLng[1] ];
log(
'Move 1km to the north, 2km to the east of the Eifel Tower: ' +
dest[0] +','+ dest[1]
);
var dLatLng = Earth.setHeading(eifel.lat, eifel.lng, 10000, 30);
latLng = [dLatLng.lat, dLatLng.lng ];
log(
'Move 10km from the Eifel Tower, heading 30° (North = 0; east = 90°; ...): ' +
latLng[0] +','+ latLng[1]
);
}
</script>
<div id="log"></div>
* note:
* - all distances are in meter. all angles are in degree
* - the d in dLat and dLng stands for delta, being a change in coordinates
* - x is along the longitude, y is along latitude
*/
Earth = {
// #see http://www.space.com/17638-how-big-is-earth.html for the data
// along the equator
circumference_equator: 40075000,
// throught both poles.
// Note: this is basically the original definition of the meter; they were 2km off on a distance from pole to equator ( http://en.wikipedia.org/wiki/History_of_the_metre )
circumference_poles: 40008000,
// given a change in latitude, how many meters did you move?
lat2Y: function(dLat) {
return this.circumference_poles / 360 * dLat;
},
// given a change in longitude and a given latitude, how many meters did you move?
lng2X: function(dLng, lat) {
return Math.cos( this.deg2rad(lat) ) * (this.circumference_poles / 360 * dLng);
},
// given a distance you move due North (or South), what's the new coordinates?
// returns a change in latitude
y2Lat: function(y) {
return y * 360 / this.circumference_poles;
},
// given a distance you move due East (or West) and a given latitude, what's the new coordinates?
// returns a change in longitude
x2Lng: function(x, lat) {
return x * 360 / ( Math.cos( this.deg2rad(lat) ) * this.circumference_poles);
},
// (360°) degrees to radials
deg2rad: function(deg) {
return deg * Math.PI / 180;
},
// returns a change in position
xy2LatLng: function(y, x, lat) {
return {
lat: this.y2Lat(y),
lng: this.x2Lng(x, lat)
};
},
// #param heading: North = 0; east = 90°; ...
// returns a change in position
setHeading: function(lat, lng, dist, heading) {
var latDestination = lat + this.y2Lat(dist * Math.cos(this.deg2rad(heading)));
var lngDestination = lng + this.x2Lng(dist * Math.sin(this.deg2rad(heading)), lat);
return {
lat: latDestination,
lng: lngDestination
};
},
// returns the absolute position
moveByXY: function(lat, lng, x, y) {
var dLatLng = Earth.xy2LatLng(x, y, lat);
latLng = [dLatLng.lat, dLatLng.lng ];
return {
lat: lat + latLng[0],
lng: lng + latLng[1]
}
}
}
I have a raster image (in Tiff format) and a polygon area in shapefile format converted in an Array. I wish to find an elegant way to create an array where all element inside the border of polygon have 1 value and all element outside the polygon have value 0. My final goal is mask the array derived from the image with the array derived from the shapefile.
i have the following question and thanks for helps:
after create an empty array using np.zeros((ds.RasterYSize, ds.RasterXSize)) and the pixel location of a geospatial coordinate of the border of my polygon, what is the best solution to fill with 1 the polygon inside the array?
from osgeo import gdal, gdalnumeric, ogr, osr
import osgeo.gdal
import math
import numpy
import numpy as np
def world2Pixel(geoMatrix, x, y):
"""
Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate
the pixel location of a geospatial coordinate
(source http://www2.geog.ucl.ac.uk/~plewis/geogg122/vectorMask.html)
geoMatrix
[0] = top left x (x Origin)
[1] = w-e pixel resolution (pixel Width)
[2] = rotation, 0 if image is "north up"
[3] = top left y (y Origin)
[4] = rotation, 0 if image is "north up"
[5] = n-s pixel resolution (pixel Height)
"""
ulX = geoMatrix[0]
ulY = geoMatrix[3]
xDist = geoMatrix[1]
yDist = geoMatrix[5]
rtnX = geoMatrix[2]
rtnY = geoMatrix[4]
pixel = np.round((x - ulX) / xDist).astype(np.int)
line = np.round((ulY - y) / xDist).astype(np.int)
return (pixel, line)
# Open the image as a read only image
ds = osgeo.gdal.Open(inFile,gdal.GA_ReadOnly)
# Get image georeferencing information.
geoMatrix = ds.GetGeoTransform()
ulX = geoMatrix[0] # top left x (x Origin)
ulY = geoMatrix[3] # top left y (y Origin)
xDist = geoMatrix[1] # w-e pixel resolution (pixel Width)
yDist = geoMatrix[5] # n-s pixel resolution (pixel Height)
rtnX = geoMatrix[2] # rotation, 0 if image is "north up"
rtnY = geoMatrix[4] #rotation, 0 if image is "north up"
# open shapefile (= border of are of interest)
shp = osgeo.ogr.Open(poly)
source_shp = ogr.GetDriverByName("Memory").CopyDataSource(shp, "")
# get the coordinates of the points from the boundary of the shapefile
source_layer = source_shp.GetLayer(0)
feature = source_layer.GetNextFeature()
geometry = feature.GetGeometryRef()
pts = geometry.GetGeometryRef(0)
points = []
for p in range(pts.GetPointCount()):
points.append((pts.GetX(p), pts.GetY(p)))
pnts = np.array(points).transpose()
print pnts
pnts
array([[ 558470.28969598, 559495.31976318, 559548.50931402,
559362.85560495, 559493.99688721, 558958.22572622,
558529.58862305, 558575.0174293 , 558470.28969598],
[ 6362598.63707171, 6362629.15167236, 6362295.16466266,
6362022.63453845, 6361763.96246338, 6361635.8559779 ,
6361707.07684326, 6362279.69352024, 6362598.63707171]])
# calculate the pixel location of a geospatial coordinate (= define the border of my polygon)
pixels, line = world2Pixel(geoMatrix,pnts[0],pnts[1])
pixels
array([17963, 20013, 20119, 19748, 20010, 18939, 18081, 18172, 17963])
line
array([35796, 35734, 36402, 36948, 37465, 37721, 37579, 36433, 35796])
#create an empty array with value zero using
data = np.zeros((ds.RasterYSize, ds.RasterXSize))
This is essentially a point-in-polygon problem.
Here's a little library to solve this problem. It's from this page with some modifications to make it more readable.
pip.py
#From http://www.ariel.com.au/a/python-point-int-poly.html
# Modified by Nick ODell
from collections import namedtuple
def point_in_polygon(target, poly):
"""x,y is the point to test. poly is a list of tuples comprising the polygon."""
point = namedtuple("Point", ("x", "y"))
line = namedtuple("Line", ("p1", "p2"))
target = point(*target)
inside = False
# Build list of coordinate pairs
# First, turn it into named tuples
poly = map(lambda p: point(*p), poly)
# Make two lists, with list2 shifted forward by one and wrapped around
list1 = poly
list2 = poly[1:] + [poly[0]]
poly = map(line, list1, list2)
for l in poly:
p1 = l.p1
p2 = l.p2
if p1.y == p2.y:
# This line is horizontal and thus not relevant.
continue
if max(p1.y, p2.y) < target.y <= min(p1.y, p2.y):
# This line is too high or low
continue
if target.x < max(p1.x, p2.x):
# Ignore this line because it's to the right of our point
continue
# Now, the line still might be to the right of our target point, but
# still to the right of one of the line endpoints.
rise = p1.y - p2.y
run = p1.x - p2.x
try:
slope = rise/float(run)
except ZeroDivisionError:
slope = float('inf')
# Find the x-intercept, that is, the place where the line we are
# testing equals the y value of our target point.
# Pick one of the line points, and figure out what the run between it
# and the target point is.
run_to_intercept = target.x - p1.x
x_intercept = p1.x + run_to_intercept / slope
if target.x < x_intercept:
# We almost crossed the line.
continue
inside = not inside
return inside
if __name__ == "__main__":
poly = [(2,2), (1,-1), (-1,-1), (-1, 1)]
print point_in_polygon((1.5, 0), poly)
The accepted answer doesn't work for me.
I ended up using shapely library.
sudo pip install shapely
Code:
import shapely.geometry
poly = shapely.geometry.Polygon([(2,2), (1,-1), (-1,-1), (-1, 1)])
point = shapely.geometry.Point(1.5, 0)
point.intersects(poly)