find circles with Hough transform - python

the code receives an img with a circle in greyscale and need to output an
Returns:
img_c: RGB image array in float format (range: 0..1) which contains
the original image and the marked circles which were found.
circles: a list - each element represent a circle and contains a list of
3 values: origin_x, origin_y, radius
and I get some error and I don't know why, I already implement edge_detect function using canny edge detector, and now I need to implement this, I can't use cv2 just numpy, and from scipy.signal import convolved
thanks!!
def find_circles(img: np.array, t_low: np.float32, t_high: np.float32) -> (np.array, list):
edges, tg_theta = edge_detect(img, t_low, t_high)
r_min = 14
ncols, nrows = edges.shape
r_max = round(min(ncols, nrows) / 2)
M = np.zeros((ncols, nrows, r_max))
for x in range(ncols):
for y in range(nrows):
if edges[x,y] == 0:
continue
if abs(tg_theta[x,y]) <= 1:
for cx in range(max(x - r_max, 1), min(x + r_max, ncols)):
cy = round(tg_theta[x, y] * (cx - x) + y)
r = round(np.sqrt((cx - x)**2 +(cy - y)**2))
if 0 < cy < nrows and 0 < r < r_max:
M[cx, cy, r] +=1
else:
for cy in range(max(y - r_max, 1), min(y + r_max, nrows)):
cx = round((cy-y)/tg_theta[x, y] + x)
r = round(np.sqrt((cx - x)**2 +(cy - y)**2))
if 0 < cx < ncols and 0 < r < r_max:
M[cx, cy, r] +=1
kernel = np.ones((5, 5, 5)) / 125.0
M_smooth = convolve(M, kernel, mode='same')
img_c = np.zeros_like(edges)
circles=[]
for cx in range(ncols):
for cy in range(nrows):
for r in range(r_min,r_max):
if M_smooth[cx, cy, r] <= r/25:
continue
D=15
neigbours = M_smooth[ max(cx-D, 1):min(cx+D, ncols), max(cy-D, 1):min(cy+D, nrows), max(r-D, r_min):min(r+D, r_max)]
if M_smooth[cx, cy, r] < np.max(neigbours):
continue
circles.append([cx, cy, r])
theta = 0
while theta <= 2*np.pi:
pt = np.round(np.array([cx,cy]) + np.array([r*np.cos(theta),r*np.sin(theta)]))
irow = int(pt[1])
icol = int(pt[0])
if 0 < irow < nrows and 0 < icol < ncols:
img_c[irow, icol] = 1
theta = theta + 1/r
return img_c, circles
edges is array in int format (values: 0, 1) - binary image with edges pixels set to 1.
I get this error
TypeError: loop of ufunc does not support argument 0 of type int which has no callable sqrt method
in line
r = round(np.sqrt((cx - x)**2 +(cy - y)**2))
EDIT:
now the code run, I change it to float like u said
def find_circles(img: np.array, t_low: np.float32, t_high: np.float32) -> (np.array, list):
edges, tg_theta = edge_detect(img, t_low, t_high)
r_min = 14
ncols, nrows = edges.shape
r_max = round(min(ncols, nrows) / 2)
M = np.zeros((ncols, nrows, r_max))
for x in range(ncols):
for y in range(nrows):
if edges[x,y] == 0:
continue
if abs(tg_theta[x,y]) <= 1:
for cx in range(max(x - r_max, 1), min(x + r_max, ncols)):
cy = round(tg_theta[x, y] * (cx - x) + y)
r = round(np.sqrt(float((cx - x)**2) +float((cy - y)**2)))
if 0 < cy < nrows and 0 < r < r_max:
M[cx, cy, r] +=1
else:
for cy in range(max(y - r_max, 1), min(y + r_max, nrows)):
cx = round((cy-y)/tg_theta[x, y] + x)
r = round(np.sqrt(float((cx - x)**2) +float((cy - y)**2)))
if 0 < cx < ncols and 0 < r < r_max:
M[cx, cy, r] +=1
kernel = np.ones((5, 5, 5)) / 125.0
M_smooth = convolve(M, kernel, mode='same')
img_c = np.zeros_like(img)
circles=[]
for cx in range(ncols):
for cy in range(nrows):
for r in range(r_min,r_max):
if M_smooth[cx, cy, r] <= r/25:
continue
D=15
neigbours = M_smooth[ max(cx-D, 1):min(cx+D, ncols), max(cy-D, 1):min(cy+D, nrows), max(r-D, r_min):min(r+D, r_max)]
if M_smooth[cx, cy, r] < np.max(neigbours):
continue
circles.append([cx, cy, r])
theta = 0
while theta <= 2*np.pi:
pt = np.round(np.array([cx,cy]) + np.array([r*np.cos(theta),r*np.sin(theta)]))
irow = int(pt[1])
icol = int(pt[0])
if 0 < irow < nrows and 0 < icol < ncols:
img_c[icol, irow] = 1
theta = theta + 1/r
return img_c, circles
and my main code is:
img = read_file(file_name='balls5.tiff') / 255
img_c, circles = find_circles(img=img, t_low=9/255, t_high=10/255)
img_rgb = np.dstack((img, img, img))
img_rgb[:,:,2] += img_c
img_rgb = img_rgb.clip(0,1)
plt.imshow(img_rgb);
print(circles)
but the problem is I get the output as the same image as the input
I get:
the correct answer is:
EDIT again:
now I change to this:
def find_circles(img: np.array, t_low: np.float32, t_high: np.float32) -> (np.array, list):
edges, tg_theta = edge_detect(img, t_low, t_high)
r_min = 14
ncols, nrows = img.shape
r_max = round(min(ncols, nrows) / 2)
M = np.zeros((ncols, nrows, r_max))
e_col, e_row = edges.shape
for x in range(e_col):
for y in range(e_row):
if edges[x, y] == 0:
continue
if abs(tg_theta[x, y]) <= 1:
for cx in range(max(x - r_max, 0), min(x + r_max, ncols)):
cy = round(tg_theta[x, y] * (cx - x) + y)
r = round(np.sqrt(float((cx - x) ** 2) + float((cy - y) ** 2)))
if 0 < cy < nrows and 0 < r < r_max:
M[cx, cy, r] += 1
else:
for cy in range(max(y - r_max, 0), min(y + r_max, nrows)):
cx = round((cy - y) / tg_theta[x, y] + x)
r = round(np.sqrt(float((cx - x) ** 2) + float((cy - y) ** 2)))
if 0 < cx < ncols and 0 < r < r_max:
M[cx, cy, r] += 1
kernel = np.ones((5, 5, 5)) / 125.0
M_smooth = convolve(M, kernel, mode='same')
img_c = np.zeros_like(img)
circles = []
for cx in range(ncols):
for cy in range(nrows):
for r in range(r_min, r_max):
if M_smooth[cx, cy, r] <= r / 25:
continue
D = 15
neighbours = M_smooth[max(cx - D, 1):min(cx + D, ncols), max(cy - D, 1):min(cy + D, nrows),
max(r - D, r_min):min(r + D, r_max)]
if M_smooth[cx, cy, r] < np.max(neighbours):
continue
circles.append([cx, cy, r])
theta = 0
while theta <= 2 * np.pi:
pt = np.round(np.array([cx, cy]) + np.array([r * np.cos(theta), r * np.sin(theta)]))
irow = int(pt[1])
icol = int(pt[0])
if 0 < irow < nrows and 0 < icol < ncols:
img_c[icol, irow] = 1
theta = theta + 1 / r
return img_c, circles
and I get this output:

Related

Fitting arcs on a contour using OpenCV?

I am working on a project in OpenCV where I have a contour and I have to fit some arcs with defined arc length, radius, chord length, and angle. With fit, I mean I have to choose the best arc, that would cover a piece of the contour.
What I have tried till now is in all the contours, I have found corners which are acting as my segments, then using some linear algebra I am calculating the radius and the center of the circle that would best fit that segment and then comparing this with all the other arcs, radius and central angle and fitting the with the least difference in values. This approach is not helping me much as the arcs that I am getting don't lie on the contour. Code is attached
for contours in final_contours:
iter = 0
dict_sorted, sorted_corners = sortCorners(contours, corners)
keys_dict_sorted = sorted(dict_sorted.keys())
for key in keys_dict_sorted:
[x, y] = dict_sorted[key]
bl_img = cv2.circle(bl_img, (x, y), 3, (255, 0, 0), -1)
# cv2_imshow(bl_img)
for i in range(len(keys_dict_sorted) ):
if (i != len(keys_dict_sorted)-1):
idx1 = keys_dict_sorted[i]
idx3 = keys_dict_sorted[i+1]
idx2 = math.floor((keys_dict_sorted[i] + keys_dict_sorted[i+1]) / 2)
else:
idx1 = keys_dict_sorted[i]
idx3 = keys_dict_sorted[0]
idx2 = math.floor((keys_dict_sorted[i] + keys_dict_sorted[0]) / 2)
radius, [c_x, c_y] = betterCircleFinder(contours[idx1][0], contours[idx2][0], contours[idx3][0])
if radius != -1:
radius = round(truncate(radius, -1))
c_x = round(truncate(c_x, -1))
c_y = round(truncate(c_y, -1))
pt1_angle = 180*(np.arctan2(np.array([contours[idx1][0][1]- c_y], dtype=float), np.array([contours[idx1][0][0] - c_x], dtype=float))/np.pi)
pt2_angle = 180*(np.arctan2(np.array([contours[idx3][0][1] - c_y], dtype=float),np.array([contours[idx3][0][0] - c_x], dtype=float))/np.pi)
# print(pt1_angle, pt2_angle)
angle = abs(pt1_angle - pt2_angle)
# print("Angle : ", angle)
actualRadius = radius * inchPerPixel
# print("Actual Radius : ", actualRadius)
b = random.randint(0,255)
g = random.randint(0, 255)
r = random.randint(0,255)
error = [0 for i in range(len(copings))]
idx = 0
for coping in copings:
err = abs(actualRadius - (coping.radius))
err1 = abs(angle - coping.centralAngle)
error[idx] = (0.5 * err/actualRadius) + (0.5 * err1[0]/coping.centralAngle)
idx+=1
# bl_img = draw_coris(bl_img, int(coping.radius * pixelPerInch), 0, pt1_angle, pt1_angle + coping.centralAngle , (c_x, c_y), (b, g,r), 5)
# cv2_imshow(bl_img)
index_min = min(range(len(error)), key=error.__getitem__)
# pprint.pprint(error)
# print(index_min, error[index_min])
# print(copings[index_min].radius * pixelPerInch)
bl_img = draw_coris(bl_img, int(copings[index_min].radius * pixelPerInch), 0, pt1_angle, pt1_angle + copings[index_min].centralAngle , (int(c_x), int(c_y)), (b, g,r), 5)
if (abs(c_x) > (2 ** 31 -1) or abs(c_y) > (2**31 -1)):
print("OVERFLOW VALUES")
# cv2_imshow(bl_img)
continue
def betterCircleFinder(pt1, pt2, pt3):
A = np.array([[2*pt1[0], 2*pt1[1], 1],
[2*pt2[0], 2*pt2[1], 1],
[2*pt3[0], 2*pt3[1], 1] ])
B = np.array([[-1 * (pt1[0] ** 2 + pt1[1]**2)],
[-1 * (pt2[0] ** 2 + pt2[1]**2)],
[-1 * (pt3[0] ** 2 + pt3[1]**2)]])
det = np.linalg.det(A)
if (det == 0):
return -1, [-1, -1]
C = np.linalg.inv(A)#B
C = np.squeeze(C)
c_x = -1 * C[0]
c_y = -1 * C[1]
c = C[2]
radius = math.sqrt(c_x ** 2 + c_y** 2 - c)
return radius, [c_x, c_y]
This is the code that find the details of the circle that best fits the segment.

How to accelerate this function using Numba?

I was trying to optimize this function using Numba, but I am unable to do it. I think this has no part of the code which can be accelerated. If anyone can help me with an optimized version of this, My program would become blazing fast. Please tell if any dataset or other info is needed. When I apply direct #jit on this, It is not working.
def c_a(x, y, z, counter, p_l):
# start = time.time()
if counter == 1:
l = x
m = y
n = z
path = "c_r.pdb"
global r_a_t
p = Bio.PDB.PDBParser()
structure = p.get_structure('mSN1', path)
c_r = [a.get_coord() for a in structure.get_atoms()]
lengthnew = len(c_r)
m_d = np.array([-45, -45, -45])
a_s_r = np.zeros((128, 128, 128), np.complex)
for i in range(0, lengthnew):
x = int(math.floor((c_r[i][0] - m_d[0]) / 1.2))
y = int(math.floor((c_r[i][1] - m_d[1]) / 1.2))
z = int(math.floor((c_r[i][2] - m_d[2]) / 1.2))
with open("Ei.txt", 'r') as ei_values:
for row in ei_values:
s_v = row.split()
if s_v[0] == r_a_t[i] :
a_s_r[x, y, z] = np.complex(s_v[1])
n_n = lambda x, y, z : [(x2, y2, z2) for x2 in range(x - 5, x + 6)
for y2 in range(y - 5, y + 6)
for z2 in range(z - 5, z + 6)
if (-1 < x < X and
-1 < y < Y and
-1 < z < Z and
(x != x2 or y != y2 or z != z2) and
(0 <= x2 < X) and
(0 <= y2 < Y) and
(0 <= z2 < Z) and
((( abs(x - x2)) ** 2 + (abs(y - y2)) ** 2 + (abs(z - z2)) ** 2 ) <= 25))]
m = n_n(l, m, n)
result = 0
for i in range(0, len(m)):
a = m[i][0]
b = m[i][1]
c = m[i][2]
result = result + a_s_r[a][b][c]
return result
else:
l = x
m = y
n = z
path = p_l
global l_a_t
p = Bio.PDB.PDBParser()
structure = p.get_structure('mSN1', path)
c_l = [a.get_coord() for a in structure.get_atoms()]
lengthnew = len(c_l)
m_d = np.array([-45, -45, -45])
a_s_l = np.zeros((128, 128, 128), np.complex)
for i in range(0, lengthnew):
x = int(math.floor((c_l[i][0] - m_d[0]) / 1.2))
y = int(math.floor((c_l[i][1] - m_d[1]) / 1.2))
z = int(math.floor((c_l[i][2] - m_d[2]) / 1.2))
with open("E.txt", 'r') as e_v:
for row in e_v:
s_v = row.split()
if s_v[0] == l_a_t[i] :
a_s_l[x, y, z] = np.complex(s_v[1])
n_n = lambda x, y, z : [(x2, y2, z2) for x2 in range(x - 5, x + 6)
for y2 in range(y - 5, y + 6)
for z2 in range(z - 5, z + 6)
if (-1 < x < X and
-1 < y < Y and
-1 < z < Z and
(x != x2 or y != y2 or z != z2) and
(0 <= x2 < X) and
(0 <= y2 < Y) and
(0 <= z2 < Z) and
(((abs(x - x2)) ** 2 + (abs(y - y2)) ** 2 + (abs(z - z2)) ** 2 ) <= 25))]
m = n_n(l, m, n)
result = 0
for i in range(0, len(m)):
a = m[i][0]
b = m[i][1]
c = m[i][2]
result = result + a_s_l[a][b][c]
# print "c_a : ", time.time() - start
return result
Solved.
Brought out all the file reading steps outside the function, as they were being executed many times. It gave a 70x boost.
Just left the lambda functions in the function as they are dependent on x, y & z.

Perlin noise looks streaky and not coherent

Now that my perlin generator is 'working' I created noise, to find that it is nothing like what I see on the internets...
My noise:
Notice the streaks:
What I am aiming to get (obviously with corresponding colour):
1:
Why does mine look so noisy and nasty?
Code (sorry for no stub, the Perlin noise makes up most of the program so it's important to include the full program):
from PIL import Image
from tkinter import filedialog
from random import randint, random
#Initialise width / height
width = 625
height = 625
#Import gradient picture - 200*1 image used to texture perlin noise
#R,G,B,Alpha
gradient = Image.open("image.png")
gradlist = list(gradient.getdata())
#Create new image
img = Image.new('RGBA', (width, height), color=(255, 255, 255, 255))
#Perlin noise modules --------------------------------------------------------------------------------------------------------
#Modules
from random import sample
from math import floor
p = sample([x for x in range(0, (width * height))], (width * height)) * 2
#Antialising
def fade(t):
retval = 6*(t**5) - 15*(t**4) + 10*(t**3)
return retval
#Linear interpolation
def lerp(t,a,b):
retval = a + (t * (b - a))
return retval
#Clever bitwise hash stuff - picks a unit vector from 12 possible - (1,1,0),(-1,1,0),(1,-1,0),(-1,-1,0),(1,0,1),(-1,0,1),(1,0,-1),(-1,0,-1),(0,1,1),(0,-1,1),(0,1,-1),(0,-1,-1)
def grad(hash, x, y, z):
h = hash % 15
if h < 8:
u = x
else:
u = y
if h < 4:
v = y
elif h in (12, 14):
v = x
else:
v = z
return (u if (h & 1) == 0 else -u) + (v if (h & 2) == 0 else -v)
#Perlin function
def perlin(x,y,z):
ix = int(floor(x)) & 255
iy = int(floor(y)) & 255
iz = int(floor(z)) & 255
x -= int(floor(x))
y -= int(floor(y))
z -= int(floor(z))
u = fade(x)
v = fade(y)
w = fade(z)
#Complicated hash stuff
A = p[ix] + iy
AA = p[A] + iz
AB = p[A + 1] + iz
B = p[ix + 1] + iy
BA = p[B] + iz
BB = p[B + 1] + iz
return -lerp(w, lerp(v, lerp(u, grad(p[AA], x, y, z),grad(p[BA], x - 1, y, z)),lerp(u, grad(p[AB], x, y - 1, z),grad(p[BB], x - 1, y - 1, z))),lerp(v, lerp(u, grad(p[AA + 1], x, y, z - 1),grad(p[BA + 1], x - 1, y, z - 1)), lerp(u, grad(p[AB + 1], x, y - 1, z - 1),grad(p[BB + 1], x - 1, y - 1, z - 1))))
def octavePerlin(x,y,z,octaves,persistence):
total = 0
frequency = 1
amplitude = 1
maxValue = 0
for x in range(octaves):
total += perlin(x * frequency, y * frequency, z * frequency) * amplitude
maxValue += amplitude
amplitude *= persistence
frequency *= 2
return total / maxValue
z = random()
img.putdata([gradlist[int(octavePerlin((x + random() - 0.5) / 1000, (y + random() - 0.5) / 1000, z, 4, 2) * 100 + 100)] for x in range(width) for y in range(height)])
img.save(filedialog.asksaveasfilename() + ".png", "PNG")

Perlin noise artifacts

I've taken the Wikipedia Perlin Noise Algorithm and implemented it in Python, here is the code:
import random
import math
from PIL import Image
from decimal import Decimal
IMAGE_SIZE = 200
PERLIN_RESOLUTION = 10
GRADIENT = []
for x in range(PERLIN_RESOLUTION + 1):
GRADIENT.append([])
for y in range(PERLIN_RESOLUTION + 1):
angle = random.random() * 2 * math.pi
vector = (
Decimal(math.cos(angle)),
Decimal(math.sin(angle))
)
GRADIENT[x].append(vector)
def lerp(a0, a1, w):
return (1 - w)*a0 + w*a1
def dotGridGradient(ix, iy, x, y):
dx = x - Decimal(ix)
dy = y - Decimal(iy)
return (dx*GRADIENT[iy][ix][0] + dy*GRADIENT[iy][ix][1])
def perlin(x, y):
if x > 0.0:
x0 = int(x)
else:
x0 = int(x) - 1
x1 = x0 + 1
if y > 0.0:
y0 = int(y)
else:
y0 = int(y) - 1
y1 = y0 + 1
sx = x - Decimal(x0)
sy = y - Decimal(y0)
n0 = dotGridGradient(x0, y0, x, y)
n1 = dotGridGradient(x1, y0, x, y)
ix0 = lerp(n0, n1, sx)
n0 = dotGridGradient(x0, y1, x, y)
n1 = dotGridGradient(x1, y1, x, y)
ix1 = lerp(n0, n1, sx)
value = lerp(ix0, ix1, sy)
return value
image = Image.new('RGB', (IMAGE_SIZE, IMAGE_SIZE))
pixels = image.load()
for i in range(IMAGE_SIZE):
x = Decimal(i) / IMAGE_SIZE
for j in range(IMAGE_SIZE):
y = Decimal(j) / IMAGE_SIZE
value = perlin(x * 10, y * 10)
greyscale = (value + 1) * 255 / 2
pixels[i, j] = (greyscale, greyscale, greyscale)
image.save('artifacts.png', 'PNG')
Here is the resulting image that is created by the script:
I must be missing something here, you can very clearly see the vertices. Can anyone let me know what is going wrong?
You need to use smoothstep instead of linear interpolation.
def smoothstep(a0, a1, w):
value = w*w*w*(w*(w*6 - 15) + 10)
return a0 + value*(a1 - a0)

Fast Voxel Traversal 2D

I'm trying traverse all the cells that a line goes through. I've found the Fast Voxel Traversal Algorithm that seems to fit my needs, but I'm currently finding to be inaccurate. Below is a graph with a red line and points as voxel coordinates that the algorithm gives. As you can see it is almost correct except for the (4, 7) point, as it should be (5,6). I'm not sure if i'm implementing the algorithm correctly either so I've included it in Python. So i guess my question is my implementation correct or is there a better algo to this?
Thanks
def getVoxelTraversalPts(strPt, endPt, geom):
Max_Delta = 1000000.0
#origin
x0 = geom[0]
y0 = geom[3]
(sX, sY) = (strPt[0], strPt[1])
(eX, eY) = (endPt[0], endPt[1])
dx = geom[1]
dy = geom[5]
sXIndex = ((sX - x0) / dx)
sYIndex = ((sY - y0) / dy)
eXIndex = ((eX - sXIndex) / dx) + sXIndex
eYIndex = ((eY - sYIndex) / dy) + sYIndex
deltaX = float(eXIndex - sXIndex)
deltaXSign = 1 if deltaX > 0 else -1 if deltaX < 0 else 0
stepX = deltaXSign
tDeltaX = min((deltaXSign / deltaX), Max_Delta) if deltaXSign != 0 else Max_Delta
maxX = tDeltaX * (1 - sXIndex + int(sXIndex)) if deltaXSign > 0 else tDeltaX * (sXIndex - int(sXIndex))
deltaY = float(eYIndex - sYIndex)
deltaYSign = 1 if deltaY > 0 else -1 if deltaY < 0 else 0
stepY = deltaYSign
tDeltaY = min(deltaYSign / deltaY, Max_Delta) if deltaYSign != 0 else Max_Delta
maxY = tDeltaY * (1 - sYIndex + int(sYIndex)) if deltaYSign > 0 else tDeltaY * (sYIndex - int(sYIndex))
x = sXIndex
y = sYIndex
ptsIndexes = []
pt = [round(x), round(y)]
ptsIndexes.append(pt)
prevPt = pt
while True:
if maxX < maxY:
maxX += tDeltaX
x += deltaXSign
else:
maxY += tDeltaY
y += deltaYSign
pt = [round(x), round(y)]
if pt != prevPt:
#print pt
ptsIndexes.append(pt)
prevPt = pt
if maxX > 1 and maxY > 1:
break
return (ptsIndexes)
The voxels that you are walking start at 0.0, i.e. the first voxel spans space from 0.0 to 1.0, a not from -0.5 to 0.5 as you seem to be assuming. In other words, they are the ones marked with dashed line, and not the solid one.
If you want voxels to be your way, you will have to fix initial maxX and maxY calculations.
Ain't nobody got time to read the paper you posted and figure out if you've implemented it correctly.
Here's a question, though. Is the algorithm you've used (a) actually meant to determine all the cells that a line passes through or (b) form a decent voxel approximation of a straight line between two points?
I'm more familiar with Bresenham's line algorithm which performs (b). Here's a picture of it in action:
Note that the choice of cells is "aesthetic", but omits certain cells the line passes through. Including these would make the line "uglier".
I suspect a similar thing is going on with your voxel line algorithm. However, looking at your data and the Bresenham image suggests a simple solution. Walk along the line of discovered cells, but, whenever you have to make a diagonal step, consider the two intermediate cells. You can then use a line-rectangle intersection algorithm (see here) to determine which of the candidate cells should have, but wasn't, included.
I guess just to be complete, I decided to use a different algo. the one referenced here dtb's answer on another question.
here's the implementation
def getIntersectPts(strPt, endPt, geom=[0,1,0,0,0,1]):
'''
Find intersections pts for every half cell size
** cell size has only been tested with 1
Returns cell coordinates that the line passes through
'''
x0 = geom[0]
y0 = geom[3]
(sX, sY) = (strPt[0], strPt[1])
(eX, eY) = (endPt[0], endPt[1])
xSpace = geom[1]
ySpace = geom[5]
sXIndex = ((sX - x0) / xSpace)
sYIndex = ((sY - y0) / ySpace)
eXIndex = ((eX - sXIndex) / xSpace) + sXIndex
eYIndex = ((eY - sYIndex) / ySpace) + sYIndex
dx = (eXIndex - sXIndex)
dy = (eYIndex - sYIndex)
xHeading = 1.0 if dx > 0 else -1.0 if dx < 0 else 0.0
yHeading = 1.0 if dy > 0 else -1.0 if dy < 0 else 0.0
xOffset = (1 - (math.modf(sXIndex)[0]))
yOffset = (1 - (math.modf(sYIndex)[0]))
ptsIndexes = []
x = sXIndex
y = sYIndex
pt = (x, y) #1st pt
if dx != 0:
m = (float(dy) / float(dx))
b = float(sY - sX * m )
dx = abs(int(dx))
dy = abs(int(dy))
if dx == 0:
for h in range(0, dy + 1):
pt = (x, y + (yHeading *h))
ptsIndexes.append(pt)
return ptsIndexes
#print("m {}, dx {}, dy {}, b {}, xdir {}, ydir {}".format(m, dx, dy, b, xHeading, yHeading))
#print("x {}, y {}, {} {}".format(sXIndex, sYIndex, eXIndex, eYIndex))
#snap to half a cell size so we can find intersections on cell boundaries
sXIdxSp = round(2.0 * sXIndex) / 2.0
sYIdxSp = round(2.0 * sYIndex) / 2.0
eXIdxSp = round(2.0 * eXIndex) / 2.0
eYIdxSp = round(2.0 * eYIndex) / 2.0
# ptsIndexes.append(pt)
prevPt = False
#advance half grid size
for w in range(0, dx * 4):
x = xHeading * (w / 2.0) + sXIdxSp
y = (x * m + b)
if xHeading < 0:
if x < eXIdxSp:
break
else:
if x > eXIdxSp:
break
pt = (round(x), round(y)) #snapToGrid
# print(w, x, y)
if prevPt != pt:
ptsIndexes.append(pt)
prevPt = pt
#advance half grid size
for h in range(0, dy * 4):
y = yHeading * (h / 2.0) + sYIdxSp
x = ((y - b) / m)
if yHeading < 0:
if y < eYIdxSp:
break
else:
if y > eYIdxSp:
break
pt = (round(x), round(y)) # snapToGrid
# print(h, x, y)
if prevPt != pt:
ptsIndexes.append(pt)
prevPt = pt
return set(ptsIndexes) #elminate duplicates

Categories