Related
I'm working on some map projection interface code in pyqtgraph. My image is an aitoff-hammer projection, with the same general shape as the following atlas example:
aitoff-hammer map projection:
.
The problem is, my image doesn't have those grid lines. I've verified my code to convert pixel values to latitude/longitude coordinates works, but I'm having trouble coming up with an approach to make a custom grid. My current thought is to make a new image that just has grid lines, then make that a semi-opaque ImageItem. But, is it possible to stack ImageItems in pyqtgraph?
As musicamante pointed out, I was able to stack ImageItems. I'll be evaluating the accuracy of the curves, but here is the relevant code:
def draw_grid(self, lon, lat, lon_int, lat_int):
lon[np.isnan(lon)] = lon_int + 2
lat[np.isnan(lat)] = lat_int + 2
lon_cond = lon - (lon_int * (lon // lon_int)) <= 0.5
lat_cond = lat - (lat_int * (lat // lat_int)) <= 0.5
inds = np.where(lon_cond | lat_cond)
grid = np.zeros(self.map_data.shape, dtype = np.uint8)
grid[inds] = 255
grid_image = pg.ImageItem(grid, axisOrder = 'row-major')
grid_image.setZValue(10)
grid_image.setOpacity(0.3)
self.map_plot.addItem(grid_image)
I'm trying to create function to geographically divide a region into a equal sized grid of 50x50 (meters). The function needs to return the upper-left and lower-right geographical coordinates of each cell. I'm using numpy:
import numpy as np
upper_right = (33.775353, -111.566165)
lower_right = (33.273915, -111.566165)
upper_left = (33.775353, -112.439578)
lower_left = (33.273915, -112.439578)
cols = np.linspace(lower_left[1], lower_right[1], num=50)
rows = np.linspace(lower_left[0], upper_left[0], num=50)
I don't have any experience with numpy, and in fact that's my first time using it. So I'm not sure if the linspace is the better method for what I'm trying to do. Some guidance would be very helpful.
Update: I've managed to remove the redundancy by calculating the upper_right and lower_right in runtime. Also, I've moved everything to a function that accepts the cell_size (default 50):
import numpy as np
def calculate_grid(upper_left, lower_right, cell_size=50):
upper_right = {'lat': upper_left['lat'], 'lon': lower_right['lon']}
lower_left = {'lat': lower_right['lat'], 'lon': upper_left['lon']}
# cols = np.linspace(lower_left['lon'], lower_right['lon'], num=cell_size)
# rows = np.linspace(lower_left['lat'], upper_left['lat'], num=cell_size)
pass
upper_left = {'lat': 33.775353, 'lon': -112.439578}
lower_right = {'lat': 33.273915, 'lon': -111.566165}
grid = calculate_grid(upper_left, lower_right)
print(grid)
I took the approach of projecting the region to a local transverse mercator coordinate system using pyproj. Then I laid out the 50m grid, and finally convert from transverse mercator back to lat/lon.
import math
import pyproj
import csv
def ll_to_xy(t, lon, lat):
return t.transform(
lon,
lat,
radians=False,
direction=pyproj.enums.TransformDirection.FORWARD)
def xy_to_ll(t, x, y):
lond, latd = t.transform(
x,
y,
radians=False,
direction=pyproj.enums.TransformDirection.INVERSE)
return lond, latd
def generate_cells(xstep, ystep, upper_left, lower_right):
# Transverse mercator coordinate reference system,
# whose origin is in the middle of the region.
lon_0 = upper_left['longitude'] + (lower_right['longitude'] - upper_left['longitude'])/2
lat_0 = lower_right['latitude'] + (upper_left['latitude'] - lower_right['latitude'])/2
geo_crs = pyproj.CRS("EPSG:4326")
tmerc_crs = pyproj.CRS.from_proj4(f'+proj=tmerc +ellps=WGS84 +lon_0={lon_0} +lat_0={lat_0} +units=m +no_defs')
# Lon/lat to tmerc.
ll_to_tmerc = pyproj.Transformer.from_crs(geo_crs, tmerc_crs, always_xy=True)
ul = ll_to_xy(ll_to_tmerc, upper_left['longitude'], upper_left['latitude'])
ur = ll_to_xy(ll_to_tmerc, lower_right['longitude'], upper_left['latitude'])
lr = ll_to_xy(ll_to_tmerc, lower_right['longitude'], lower_right['latitude'])
ll = ll_to_xy(ll_to_tmerc, upper_left['longitude'], lower_right['latitude'])
# Generate a grid, beginning with the upper left point.
grid = []
gx = ul[0]
gy = ul[1]
row = 0
lon, lat = xy_to_ll(ll_to_tmerc, gx, gy)
with open('grid_points.csv', 'w') as gpf:
gpw = csv.writer(gpf)
gpw.writerow(['longitude', 'latitude'])
while lat > lower_right['latitude']:
grid.append([])
while lon <= lower_right['longitude']:
lon, lat = xy_to_ll(ll_to_tmerc, gx, gy)
grid[row].append((lon, lat))
gpw.writerow([lon, lat])
gx += xstep
# Start the next row.
gx = ul[0]
row += 1
gy -= ystep
lon, lat = xy_to_ll(ll_to_tmerc, gx, gy)
# Make cells from the grid points.
cells = []
for i in range(len(grid) - 1):
cells.append([])
for j in range(len(grid[0]) - 1):
cells[i].append({ 'ul' : grid[i][j], 'lr' : grid[i+1][j+1]})
return cells
region = {
'upper_left': {
'latitude': -23.6060507,
'longitude': -46.627016 },
'lower_right': {
'latitude': -23.659132,
'longitude': -46.565758 } }
print('UL', region['upper_left']['longitude'], region['upper_left']['latitude'])
print('LR', region['lower_right']['longitude'], region['lower_right']['latitude'])
cells = generate_cells(50.0, 50.0, region['upper_left'], region['lower_right'])
# Test the cell dimensions with a geodesic to verify
# they are really 50m x 50m.
g = pyproj.Geod(ellps='WGS84')
for (i, row) in enumerate(cells):
for (j, col) in enumerate(row):
# Measure top edge of cell[i][j].
a12, a21, dx = g.inv(
# From upper left corner.
cells[i][j]['ul'][0],
cells[i][j]['ul'][1],
# To upper right corner.
cells[i][j]['lr'][0],
cells[i][j]['ul'][1],
radians=False)
# Measure left edge of cell[i][j].
a12, a21, dy = g.inv(
# From upper left corner.
cells[i][j]['ul'][0],
cells[i][j]['ul'][1],
# to lower left corner.
cells[i][j]['ul'][0],
cells[i][j]['lr'][1],
radians=False)
# Measure diagonal.
a12, a21, dd = g.inv(
# From upper left corner.
cells[i][j]['ul'][0],
cells[i][j]['ul'][1],
# to lower right corner.
cells[i][j]['lr'][0],
cells[i][j]['lr'][1],
radians=False)
h = math.sqrt(dx*dx + dy*dy)
print('cells[{0}][{1}] '.format(i, j))
print(' ul = {0} '.format(cells[i][j]['ul']))
print(' lr = {0} '.format(cells[i][j]['lr']))
print(' dx = {0} dy = {1} dd = {2} h = {3}'.format(dx, dy, dd, h))
if j == 4: break
if i == 4: break
What you want is kind of impossible at least while having the grind lines always oriented north-south and west-east. That is why the grid of the geographical coordinate system does not consist out of squares.
I wrote you code that creates a grid with a col length of approximately 50 m. It will not work near the poles and will have problems with distances crossing -180 and + 180 lon.
import math
meanRadius = 6371.0087714150598 # mean radius of the earth in km
colDistance = 0.05 # 50m circle distance between the points of the grid
northWest = (33.775353, -112.439578)
# The lat angle diffrence between two points 50 m circle distanc away
angleLat = (colDistance * 360) / (2 * math.pi * meanRadius)
latRadius = math.cos(math.radians(northWest[0])) * meanRadius
angleLon = (colDistance * 360) / (2 * math.pi * latRadius)
print("angleLat: {}".format(angleLat))
print("angleLon: {}".format(angleLon))
print("latRadius: {}".format(latRadius))
grid = []
for x in range(5):
grid += [[]]
for y in range(5):
grid[x] += [(northWest[0] - x * angleLat, northWest[1] + y * angleLon)]
print(grid)
Output
angleLat: 0.00044966018387976883
angleLon: 0.0005409617010418813
latRadius: 5295.733450513392
[[(33.775353, -112.439578), (33.775353, -112.43903703829895), (33.775353, -112.43849607659791), (33.775353, -112.43795511489687), (33.775353, -112.43741415319583)], [(33.77490333981612, -112.439578), (33.77490333981612, -112.43903703829895), (33.77490333981612, -112.43849607659791), (33.77490333981612, -112.43795511489687), (33.77490333981612, -112.43741415319583)], [(33.774453679632245, -112.439578), (33.774453679632245, -112.43903703829895), (33.774453679632245, -112.43849607659791), (33.774453679632245, -112.43795511489687), (33.774453679632245, -112.43741415319583)], [(33.77400401944836, -112.439578), (33.77400401944836, -112.43903703829895), (33.77400401944836, -112.43849607659791), (33.77400401944836, -112.43795511489687), (33.77400401944836, -112.43741415319583)], [(33.77355435926448, -112.439578), (33.77355435926448, -112.43903703829895), (33.77355435926448, -112.43849607659791), (33.77355435926448, -112.43795511489687), (33.77355435926448, -112.43741415319583)]]
Full disclaimer I have not checked if the results are plausible there still might be some errors in the math.
For the math behind it and how the coordinate system works consult
https://en.wikipedia.org/wiki/Geographic_coordinate_system
https://en.wikipedia.org/wiki/Great-circle_distance
I'm working on the project to detect the object from GEOTiff files and return coordinates of the objects and those output will use for drone to fly to those coordinate
I use tensorflow with YOLO v2(image detector framework) and OpenCV to detect the objects that I need in GEOTiff
import cv2
from darkflow.net.build import TFNet
import math
import gdal
# initial stage for YOLO v2
options = {
'model': 'cfg/yolo.cfg',
'load': 'bin/yolov2.weights',
'threshold': 0.4,
}
tfnet = TFNet(options)
# OpenCV read Image
img = cv2.imread('final.tif', cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#Predict the image
result = tfnet.return_predict(img)
#Calculate the center and radius of each objects
i = 0
while i < len(result):
tl = (result[i]['topleft']['x'], result[i]['topleft']['y'])
br = (result[i]['bottomright']['x'], result[i]['bottomright']['y'])
point = (int((result[i]['topleft']['x']+result[i]['bottomright']['x'])/2), int((result[i]['topleft']['y']+result[i]['bottomright']['y'])/2))
radius = int(math.hypot(result[i]['topleft']['x'] - point[0], result[i]['topleft']['y'] - point[1]))
label = result[i]['label']
result[i]['pointx'] = point[0]
result[i]['pointy'] = point[1]
result[i]['radius'] = radius
i += 1
print(result)
So the results come out like set of JSON
[{'label': 'person', 'confidence': 0.6090355, 'topleft': {'x': 3711, 'y': 1310}, 'bottomright': {'x': 3981, 'y': 1719}, 'pointx': 3846, 'pointy': 1514, 'radius': 244}]
as you can see the location of the object is return in pixel (x,y)
and I want to use these x,y to convert to coordinate in lat,lng
so I try to use GDAL (the library use for read the GEO infomation that contain inside the image)
so here's the GEO infomation of the image by using gdalinfo in terminal
Driver: GTiff/GeoTIFF
Files: final.tif
Size is 8916, 6888
Coordinate System is:
PROJCS["WGS 84 / UTM zone 47N",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.0174532925199433,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]],
PROJECTION["Transverse_Mercator"],
PARAMETER["latitude_of_origin",0],
PARAMETER["central_meridian",99],
PARAMETER["scale_factor",0.9996],
PARAMETER["false_easting",500000],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]],
AXIS["Easting",EAST],
AXIS["Northing",NORTH],
AUTHORITY["EPSG","32647"]]
Origin = (667759.259870000067167,1546341.352920000208542)
Pixel Size = (0.032920000000000,-0.032920000000000)
Metadata:
AREA_OR_POINT=Area
TIFFTAG_SOFTWARE=pix4dmapper
Image Structure Metadata:
COMPRESSION=LZW
INTERLEAVE=PIXEL
Corner Coordinates:
Upper Left ( 667759.260, 1546341.353) (100d33'11.42"E, 13d58'57.03"N)
Lower Left ( 667759.260, 1546114.600) (100d33'11.37"E, 13d58'49.65"N)
Upper Right ( 668052.775, 1546341.353) (100d33'21.20"E, 13d58'56.97"N)
Lower Right ( 668052.775, 1546114.600) (100d33'21.15"E, 13d58'49.59"N)
Center ( 667906.017, 1546227.976) (100d33'16.29"E, 13d58'53.31"N)
Band 1 Block=8916x1 Type=Byte, ColorInterp=Red
NoData Value=-10000
Band 2 Block=8916x1 Type=Byte, ColorInterp=Green
NoData Value=-10000
Band 3 Block=8916x1 Type=Byte, ColorInterp=Blue
NoData Value=-10000
Band 4 Block=8916x1 Type=Byte, ColorInterp=Alpha
NoData Value=-10000
Any one?
You need to transform pixel coordinates to geographic space using the GeoTransform matrix that is associated to your raster files. Using GDAL you could do something like the following:
# open the dataset and get the geo transform matrix
ds = gdal.Open('final.tif')
xoffset, px_w, rot1, yoffset, px_h, rot2 = ds.GetGeoTransform()
# supposing x and y are your pixel coordinate this
# is how to get the coordinate in space.
posX = px_w * x + rot1 * y + xoffset
posY = rot2 * x + px_h * y + yoffset
# shift to the center of the pixel
posX += px_w / 2.0
posY += px_h / 2.0
Of course the position you get will be relative to the same coordinate reference system that is used for your raster dataset. So if you need to transform it to lat/long you will have to do further elaborations:
# get CRS from dataset
crs = osr.SpatialReference()
crs.ImportFromWkt(ds.GetProjectionRef())
# create lat/long crs with WGS84 datum
crsGeo = osr.SpatialReference()
crsGeo.ImportFromEPSG(4326) # 4326 is the EPSG id of lat/long crs
t = osr.CoordinateTransformation(crs, crsGeo)
(lat, long, z) = t.TransformPoint(posX, posY)
Sorry I'm not really fluent in python, so probably you will have to adapt this code. Checkout the documentation of GeoTransform here for the C++ API to learn more about the matrix elements.
Without the excellent and clear Python code posted by Gabriella, I don't know if I would have ever figured out how to do this in C. The documentation and examples for gdal are amazingly sparse.
Here's a C version of Gabriella's code:
const char fn[] = "/path/to/geo/file.tif";
GDALDatasetH hDataset;
GDALAllRegister(); // Register all GDAL formats
hDataset = GDALOpen( fn, GA_ReadOnly ); // Open our geo file (GeoTIFF or other supported format)
if (hDataset == NULL)
{
printf("Failed to open dataset\n");
return;
}
// These are the input points to be transformed, in pixel coordinates of the source raster file
double x = 20;
double y = 20;
double adfGeoTransform[6];
GDALGetGeoTransform( hDataset, adfGeoTransform );
// Put the returned transform values into named vars for readability
double xoffset = adfGeoTransform[0];
double px_w = adfGeoTransform[1];
double rot1 = adfGeoTransform[2];
double yoffset = adfGeoTransform[3];
double rot2 = adfGeoTransform[4];
double px_h = adfGeoTransform[5];
// Apply transform to x,y. Put into posX,posY
double posX = px_w * x + rot1 * y + xoffset;
double posY = rot2 * x + px_h * y + yoffset;
// Transform to center of pixel
posX += px_w / 2.0;
posY += px_h / 2.0;
OGRErr err = 0;
// sr0 is the "from" spatial reference, pulled out of our file
OGRSpatialReferenceH sr0 = OSRNewSpatialReference(GDALGetProjectionRef(hDataset));
// sr1 is the "to" spatial reference, initialized as EPSG 4326 (lat/lon)
OGRSpatialReferenceH sr1 = OSRNewSpatialReference(NULL);
err = OSRImportFromEPSG(sr1, 4326);
double xtrans = posX;
double ytrans = posY;
double ztrans = 0;
int pabSuccess = 0;
// Make our transformation object
OGRCoordinateTransformationH trans = OCTNewCoordinateTransformation(sr0, sr1);
// Transform our point posX,posY, put it into xTrans,yTrans
OCTTransformEx(trans, 1, &xtrans, &ytrans, &ztrans, &pabSuccess);
GDALClose(hDataset);
printf("map coordinates (%f, %f)\n", xtrans, ytrans);
I downloaded a shapefile of Boston and wants to plot it out using the code below. However it's giving me an error ValueError: lat_0 must be between -90.000000 and 90.000000 degrees
Turns out coords has values (33869.92130000144, 777617.2998000011, 330800.31099999696, 959741.1853) Why is it so large?
Boston shapefile is obtained here
Code
# Import Boston shapefile
shapefilename = 'ZIPCODES_NT_POLY'
shp = fiona.open(shapefilename + '.shp')
coords = shp.bounds
shp.close()
w, h = coords[2] - coords[0], coords[3] - coords[1]
extra = 0.01
m = Basemap(
projection='tmerc', ellps='WGS84',
lon_0 = np.mean([coords[0], coords[2]]),
lat_0 = np.mean([coords[1], coords[3]]),
llcrnrlon = coords[0] - extra * w,
llcrnrlat = coords[1] - extra * h,
urcrnrlon = coords[2] + extra * w,
urcrnrlat = coords[3] + extra * h,
resolution = 'i', suppress_ticks = True)
Error
ValueError: lat_0 must be between -90.000000 and 90.000000 degrees
You need to reproject from the native projection of Eastings and Northings to another coordinate reference system. If you want degrees of Latitude and Longitude, which is usually WGS84 or EPSG:4326. Here's how to reproject it:
import fiona
import pyproj
from functools import partial
from shapely.geometry import box
from shapely.ops import transform
shp = fiona.open('ZIPCODES_NT_POLY.shp', 'r')
p_in = pyproj.Proj(shp.crs)
bound_box = box(*shp.bounds)
shp.close()
p_out = pyproj.Proj({'init': 'EPSG:4326'}) # aka WGS84
project = partial(pyproj.transform, p_in, p_out)
bound_box_wgs84 = transform(project, bound_box)
print('native box: ' + str(bound_box))
print('WGS84 box: ' + str(bound_box_wgs84))
native box: POLYGON ((330800.310999997 777617.2998000011, 330800.310999997 959741.1853, 33869.92130000144 959741.1853, 33869.92130000144 777617.2998000011, 330800.310999997 777617.2998000011))
WGS84 box: POLYGON ((-69.93980848410942 41.23787282321487, -69.899038698261 42.87724537285449, -73.53324195423785 42.8704709990465, -73.48147096070339 41.2312695091688, -69.93980848410942 41.23787282321487))
Otherwise, most of the parameters that are required by Basemap are in shp.crs (take a look).
You must change the coordinate reference system. I recommend geopandas:
import geopandas as gpd
import matplotlib.pyplot as plt
# load zips with the source projection
shapefilename = 'ZIPCODES_NT_POLY'
zips = gpd.read_file(shapefilename + '.shp')
# convert projection to familiar lat/lon
zips = zips.to_crs('epsg:4326')
zips.plot()
plt.show()
I have a raster image (in Tiff format) and a polygon area in shapefile format converted in an Array. I wish to find an elegant way to create an array where all element inside the border of polygon have 1 value and all element outside the polygon have value 0. My final goal is mask the array derived from the image with the array derived from the shapefile.
i have the following question and thanks for helps:
after create an empty array using np.zeros((ds.RasterYSize, ds.RasterXSize)) and the pixel location of a geospatial coordinate of the border of my polygon, what is the best solution to fill with 1 the polygon inside the array?
from osgeo import gdal, gdalnumeric, ogr, osr
import osgeo.gdal
import math
import numpy
import numpy as np
def world2Pixel(geoMatrix, x, y):
"""
Uses a gdal geomatrix (gdal.GetGeoTransform()) to calculate
the pixel location of a geospatial coordinate
(source http://www2.geog.ucl.ac.uk/~plewis/geogg122/vectorMask.html)
geoMatrix
[0] = top left x (x Origin)
[1] = w-e pixel resolution (pixel Width)
[2] = rotation, 0 if image is "north up"
[3] = top left y (y Origin)
[4] = rotation, 0 if image is "north up"
[5] = n-s pixel resolution (pixel Height)
"""
ulX = geoMatrix[0]
ulY = geoMatrix[3]
xDist = geoMatrix[1]
yDist = geoMatrix[5]
rtnX = geoMatrix[2]
rtnY = geoMatrix[4]
pixel = np.round((x - ulX) / xDist).astype(np.int)
line = np.round((ulY - y) / xDist).astype(np.int)
return (pixel, line)
# Open the image as a read only image
ds = osgeo.gdal.Open(inFile,gdal.GA_ReadOnly)
# Get image georeferencing information.
geoMatrix = ds.GetGeoTransform()
ulX = geoMatrix[0] # top left x (x Origin)
ulY = geoMatrix[3] # top left y (y Origin)
xDist = geoMatrix[1] # w-e pixel resolution (pixel Width)
yDist = geoMatrix[5] # n-s pixel resolution (pixel Height)
rtnX = geoMatrix[2] # rotation, 0 if image is "north up"
rtnY = geoMatrix[4] #rotation, 0 if image is "north up"
# open shapefile (= border of are of interest)
shp = osgeo.ogr.Open(poly)
source_shp = ogr.GetDriverByName("Memory").CopyDataSource(shp, "")
# get the coordinates of the points from the boundary of the shapefile
source_layer = source_shp.GetLayer(0)
feature = source_layer.GetNextFeature()
geometry = feature.GetGeometryRef()
pts = geometry.GetGeometryRef(0)
points = []
for p in range(pts.GetPointCount()):
points.append((pts.GetX(p), pts.GetY(p)))
pnts = np.array(points).transpose()
print pnts
pnts
array([[ 558470.28969598, 559495.31976318, 559548.50931402,
559362.85560495, 559493.99688721, 558958.22572622,
558529.58862305, 558575.0174293 , 558470.28969598],
[ 6362598.63707171, 6362629.15167236, 6362295.16466266,
6362022.63453845, 6361763.96246338, 6361635.8559779 ,
6361707.07684326, 6362279.69352024, 6362598.63707171]])
# calculate the pixel location of a geospatial coordinate (= define the border of my polygon)
pixels, line = world2Pixel(geoMatrix,pnts[0],pnts[1])
pixels
array([17963, 20013, 20119, 19748, 20010, 18939, 18081, 18172, 17963])
line
array([35796, 35734, 36402, 36948, 37465, 37721, 37579, 36433, 35796])
#create an empty array with value zero using
data = np.zeros((ds.RasterYSize, ds.RasterXSize))
This is essentially a point-in-polygon problem.
Here's a little library to solve this problem. It's from this page with some modifications to make it more readable.
pip.py
#From http://www.ariel.com.au/a/python-point-int-poly.html
# Modified by Nick ODell
from collections import namedtuple
def point_in_polygon(target, poly):
"""x,y is the point to test. poly is a list of tuples comprising the polygon."""
point = namedtuple("Point", ("x", "y"))
line = namedtuple("Line", ("p1", "p2"))
target = point(*target)
inside = False
# Build list of coordinate pairs
# First, turn it into named tuples
poly = map(lambda p: point(*p), poly)
# Make two lists, with list2 shifted forward by one and wrapped around
list1 = poly
list2 = poly[1:] + [poly[0]]
poly = map(line, list1, list2)
for l in poly:
p1 = l.p1
p2 = l.p2
if p1.y == p2.y:
# This line is horizontal and thus not relevant.
continue
if max(p1.y, p2.y) < target.y <= min(p1.y, p2.y):
# This line is too high or low
continue
if target.x < max(p1.x, p2.x):
# Ignore this line because it's to the right of our point
continue
# Now, the line still might be to the right of our target point, but
# still to the right of one of the line endpoints.
rise = p1.y - p2.y
run = p1.x - p2.x
try:
slope = rise/float(run)
except ZeroDivisionError:
slope = float('inf')
# Find the x-intercept, that is, the place where the line we are
# testing equals the y value of our target point.
# Pick one of the line points, and figure out what the run between it
# and the target point is.
run_to_intercept = target.x - p1.x
x_intercept = p1.x + run_to_intercept / slope
if target.x < x_intercept:
# We almost crossed the line.
continue
inside = not inside
return inside
if __name__ == "__main__":
poly = [(2,2), (1,-1), (-1,-1), (-1, 1)]
print point_in_polygon((1.5, 0), poly)
The accepted answer doesn't work for me.
I ended up using shapely library.
sudo pip install shapely
Code:
import shapely.geometry
poly = shapely.geometry.Polygon([(2,2), (1,-1), (-1,-1), (-1, 1)])
point = shapely.geometry.Point(1.5, 0)
point.intersects(poly)