How can I order a list of orthogonal polygon points?
For example, I have a list of orthorgonal polygon points
data = [(2, 0), (5, 0), (5, 7), (4, 7), (4, 5), (3, 5),(3, 3), (2, 3), (2, 2), (3, 2), (3, 7), (2, 7)]
Not in order.
I want to order it in a counter-clockwise way like this:
out = [(2,0),(5,0),(5,7),(4,7),(4,5),(3,5),(3,7),(2,7),(2,3),(3,3),(3,2),(2,2)]
I had tried to use deflate _hull already but it did not correct.
Is there any algorithm to solve this problem ?
I get this:
But expected :
You can use the following recursive function:
def sort_ortho_poly(points, current=None, start=None, go_x=True):
# initialize the starting point at the bottom left, which should have the least sum of x and y
if not current:
start = current = min(points, key=sum)
# if we're going x-wards, v would be the y index (1), h would be the x index (0), and vice versa
v, h = go_x, not go_x
# remove the current point from the list of points so the next recursion would be processing the remaining points
remaining = points[:]
remaining.remove(current)
# if there is no more remaining point
if not remaining:
# we've found a path if we are able to connect back to the starting point, or else we don't
return [current] if start[v] == current[v] else []
# try each point in the remaining points that goes in the right direction from the current point
for next in [p for p in remaining if p[v] == current[v]]:
# recursively find a valid path from the remaining points after flipping the direction
path = sort_ortho_poly(remaining, next, start, not go_x)
# if we get a path that does go back to the starting point, we have to make sure the path is valid
if path:
# the current edge (e1, e2)
e1, e2 = current, next
# make sure e1 is lower than or left of e2
if e1[h] > e2[h]:
e1, e2 = e2, e1
# for each edge (p1, p2) in the path, including the final edge connecting to the starting point
for p1, p2 in zip(path, path[1:] + [start]):
# make sure p1 is lower than or left of p2
if p1[0] == p2[0] and p1[1] > p2[1] or p1[1] == p2[1] and p1[0] > p2[0]:
p1, p2 = p2, p1
# if the edge is in the same line as the current edge
if p1[v] == p2[v] == e1[v]:
# make sure the two edges don't overlap
if e1[h] < p1[h] < e2[h] or e1[h] < p2[h] < e2[h] or p1[h] < e1[h] < p2[h] or p1[h] < e2[h] < p2[h]:
break
# if the edge is perpendicular to the current edge, make sure they don't cross over
elif p1[h] == p2[h] and e1[h] < p1[h] < e2[h] and p1[v] < e1[v] < p2[v]:
break
else:
# the path is valid! we append the path to the current point and return
return [current, *path]
# return empty if it's a dead end
return []
so that:
data = [(2, 0), (5, 0), (5, 7), (4, 7), (4, 5), (3, 5),(3, 3), (2, 3), (2, 2), (3, 2), (3, 7), (2, 7)]
print(sort_ortho_poly(data))
would output:
[(2, 0), (5, 0), (5, 7), (4, 7), (4, 5), (3, 5), (3, 7), (2, 7), (2, 3), (3, 3), (3, 2), (2, 2)]
Related
I am filtering a subset of edges so I can iterate through them. In this case, I am excluding the "end edges", which are the final edges along a chain:
import networkx as nx
graph = nx.Graph()
graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4)])
end_nodes = [n for n in graph.nodes if nx.degree(graph, n) == 1]
end_edges = graph.edges(end_nodes)
print(f"end edges: {end_edges}")
for edge in graph.edges:
if edge not in end_edges:
print(f"edge {edge} is not an end edge.")
else:
print(f"edge {edge} is an end edge.")
However, when you run this code, you get the following output:
end edges: [(0, 1), (4, 3)]
edge (0, 1) is an end edge.
edge (1, 2) is an end edge.
edge (2, 3) is an end edge.
edge (3, 4) is an end edge.
Edges (1, 2) and (2, 3) are not in end_edges, yet it returns False when the conditional edge not in end_edges is checked (seeming to imply that it is in fact included, when it seems to not be).
What is going on, and how can I filter this properly?
Python version is 3.7, NetworkX is 2.4.
You can convert end_nodes to a set of edges and keep the edges unordered.
>>> graph = nx.Graph()
>>> graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> end_nodes = [n for n in graph.nodes if nx.degree(graph, n) == 1]
>>> end_edges = set(map(frozenset, graph.edges(end_nodes)))
>>> end_edges
{frozenset({3, 4}), frozenset({0, 1})}
>>> for edge in graph.edges:
... print(edge, frozenset(edge) in end_edges)
...
(0, 1) True
(1, 2) False
(2, 3) False
(3, 4) True
import networkx as nx
graph = nx.Graph()
graph.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 4)])
end_nodes = [n for n in graph.nodes if nx.degree(graph, n) == 1]
end_edges = graph.edges(end_nodes)
print(f"end edges: {end_edges}")
for edge in graph.edges:
if edge not in list(end_edges):
print(f"edge {edge} is not an end edge.")
else:
print(f"edge {edge} is an end edge.")
This should return what you ask for.
I'm trying to compute the manhattan distance for an 8-puzzle with A*. I realize that I'm doing something wrong with how I compare the current positions with the goal, but I can't figure exactly what I'm doing wrong (I'm pretty new to python). Please help.
def manhattan_distance(self, goal):
dist = 0
x = 0
y = 0
goal = [(0, 0), (1, 0), (2, 0),
(0, 1), (1, 1), (2, 1),
(0, 2), (1, 2), (2, 2)]
for row in range(len(self.board)):
for col in range(len(self.board)):
val = self.board[row][col]
if val != 0:
for x, y in goal:
dist += abs(row - x) + abs(col - y)
return dist
I'm not getting any error message, it just keeps searching through nodes.
I need to create a 8x8 grid and distribute 10 coins in random positions on the grid. The problem I am facing is that the randint function will sometimes generate the same random co-ordinates and therefore only 9 or 8 coins are generated and placed on the grid. How can I make sure this doesn't happen? Cheers :) This is my code so far:
from random import randint
grid = []
#Create a 8x8 grid
for row in range(8):
grid.append([])
for col in range(8):
grid[row].append("0")
#create 10 random treasure chests
#problem is that it might generate the same co-ordinates and therefore not enough coins
for coins in range(10):
c_x = randint(0, len(grid)-1)
c_y = randint(0, len(grid[0])-1)
while c_x == 7 and c_y == 0:
c_x = randint(0, len(grid)-1)
c_y = randint(0, len(grid[0])-1)
else:
grid[c_x][c_y] = "C"
for row in grid:
print(" ".join(row))
I have included a while/else - as there must not be a coin in the bottom left corner of the grid
You only have 64 cases, so you can generate all coordinates as tuples (x,y) and then you can use random.sample to directly have 10 unique elements, so you don't have to check or redraw.
import random
from itertools import product
g = [['0' for _ in range(8)] for _ in range(8)]
coord = list(product(range(8), range(8)))
for coins in random.sample(coord, 10):
g[ coins[0] ][ coins[1] ] = 'C'
for row in g:
print(' '.join(row))
So you wish to generate 10 random unique coordinates?
You can use a set to verify:
cords_set = set()
while len(cords_set) < 10:
x, y = 7, 0
while (x, y) == (7, 0):
x, y = randint(0, len(grid) - 1), randint(0, len(grid[0]) - 1)
# that will make sure we don't add (7, 0) to cords_set
cords_set.add((x, y))
This will generate a set of tuples that represent (x, y) coordinates.
A few examples of the output of print(cords_set):
{(5, 6), (7, 6), (4, 4), (6, 3), (7, 4), (6, 2), (3, 6), (0, 4), (1, 7), (5, 2)}
{(7, 3), (1, 3), (2, 6), (5, 5), (4, 6), (3, 0), (0, 7), (2, 0), (4, 1), (6, 5)}
{(1, 2), (1, 3), (6, 7), (3, 3), (4, 5), (4, 4), (6, 0), (1, 0), (2, 5), (2, 4)}
You could add another check in your while loop to make sure there is not already a coin at the currently chosen coordinate.
BTW, you could also avoid the checks you already have by changing the range of your randint directly to match your needs.
Or you could generate all possible 7*7=49 coordinates (eliminating the unwanted coordinates) and then pick 10 different at random using the np.random.choice function.
Look at the code below:
from random import randint
grid = []
#Create a 8x8 grid
for row in range(8):
grid.append([])
for col in range(8):
grid[row].append("0")
for coins in range(10):
c_x = randint(0, len(grid)-1)
c_y = randint(0, len(grid[0])-1)
while grid[c_x][c_y] == "C":
c_x = randint(0, len(grid) - 1)
c_y = randint(0, len(grid[0]) - 1)
grid[c_x][c_y] = "C"
The after generating the coordinates you check to make sure there is no 'C' in place before assigning one to it. If there is you draw again and re-check. If there is not, you assign one and draw the next.
Let me know if this helps ☺
teren = [
'########',
'#s.....#',
'###..#.#',
'#...##.#',
'#.#....#',
'#.####.#',
'#......#',
'###e####'
]
def bfs(teren, start, end):
queue = []
visited = []
queue.append([start])
while queue:
path = queue.pop()
node = path[-1]
x = node[0]
y = node[1]
if node == end:
return path
if node in visited or teren[x][y] == "#":
continue
visited.append(node)
for adjacent in [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]:
new_path = list(path)
new_path.append(adjacent)
queue.append(new_path)
print(bfs(teren, (1,1), (7, 3)))
This is the code i used to try and navigate this maze type thing, this is the output i get [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (2, 6), (3, 6), (4, 6), (4, 5), (4, 4), (4, 3), (3, 3), (3, 2), (3, 1), (4, 1), (5, 1), (6, 1), (6, 2), (6, 3), (7, 3)] while this is the output i need [(1, 1), (1, 2), (1, 3), (2, 3), (3, 3), (3, 2), (3, 1), (4, 1), (5, 1), (6, 1), (6, 2), (6, 3), (7, 3)]
It seems this is printing out all the walkable coordinates, but I have no idea how to fix that, all the examples online that use grids focus to much on drawing the grid which clutters the actual bfs.
You will get the output you look for if you treat your queue as a queue. This means you don't pop the last element off, but you shift out the first:
replace:
path = queue.pop()
with:
path, queue = queue[0], queue[1:]
or:
path = queue.pop(0)
However deque-objects are better suited for such operations:
from collections import deque
def bfs(teren, start, end):
queue = deque([])
visited = []
queue.append([start])
while queue:
path = queue.popleft()
# ...etc.
I got an array of consisting of 0 and 1. The 1s form continuous clusters as show in the image.
The number of clusters are not known beforehand.
Is there some way to create a list with the positions of all the clusters, or a list for each cluster which contain the position of all its members. For example:
cluster_list = continuous_cluster_finder(data_array)
cluster_list[0] = [(pixel1_x, pixel1_y), (pixel2_x, pixel2_y),...]
It is not clear from the description what are the exact constraints of the problem.
Assuming you can distinguish a cluster by zeros on left, right,above,below then the following solves the problem...
#!/usr/bin/env python
data = [ #top-left
[0,0,1,1,0,0],
[0,0,1,1,0,0],
[1,1,0,0,1,1],
[1,1,0,0,1,1],
[0,0,1,1,0,0],
[0,0,1,1,0,0],
[1,1,0,0,1,1],
[1,1,0,0,1,1],
] # bottom-right
d = {} # point --> clid
dcl = {} # clid --> [point1,point2,...]
def process_point(t):
global clid # cluster id
val = data[t[0]][t[1]]
above = (t[0]-1, t[1])
abovevalid = 0 <= above[0] < maxX and 0 <= above[1] < maxY
#below = (t[0]+1, t[1]) # We do not need that because we scan from top-left to bottom-right
left = (t[0], t[1]-1)
leftvalid = 0 <= left[0] < maxX and 0 <= left[1] < maxY
#right = (t[0], t[1]+1) # We do not need that because we scan from top-left to bottom-right
if not val: # for zero return
return
if left in d and above in d and d[above] != d[left]:
# left and above on different clusters, merge them
prevclid = d[left]
dcl[d[above]].extend(dcl[prevclid]) # update dcl
for l in dcl[d[left]]:
d[l] = d[above] # update d
del dcl[prevclid]
dcl[d[above]].append(t)
d[t] = d[above]
elif above in d and abovevalid:
dcl[d[above]].append(t)
d[t] = d[above]
elif left in d and leftvalid:
dcl[d[left]].append(t)
d[t] = d[left]
else: # First saw this one
dcl[clid] = [t]
d[t] = clid
clid += 1
def print_output():
for k in dcl: # Print output
print k, dcl[k]
def main():
global clid
global maxX
global maxY
maxX = len(data)
maxY = len(data[0])
clid = 0
for i in xrange(maxX):
for j in xrange(maxY):
process_point((i,j))
print_output()
if __name__ == "__main__":
main()
It prints ...
0 [(0, 2), (0, 3), (1, 2), (1, 3)]
1 [(2, 0), (2, 1), (3, 0), (3, 1)]
2 [(2, 4), (2, 5), (3, 4), (3, 5)]
3 [(4, 2), (4, 3), (5, 2), (5, 3)]
4 [(6, 0), (6, 1), (7, 0), (7, 1)]
5 [(6, 4), (6, 5), (7, 4), (7, 5)]
You can look a well known 'blob' finding algorithms which are used in image processing to isolate regions of same color. You can also brew your own flavors by finding the islands and marking them visited (while all of them are unvisited at start); all connected ( in a 3x3 grid the center pixel as 8 connected-ness ) and visited pixels form one region; you need to find all such regions in the map.
Blob finding is what you need to look for.