Related
I've trying to implement transition from an amount of space to another which is similar to acceleration and deceleration, except i failed and the only thing that i got from this was this infinite stack of mess, here is a screenshot showing this in action:
you can see a very black circle here, which are in reality something like 100 or 200 circles stacked on top of each other
and i reached this result using this piece of code:
def Place_circles(curve, circle_space, cs, draw=True, screen=None):
curve_acceleration = []
if type(curve) == tuple:
curve_acceleration = curve[1][0]
curve_intensity = curve[1][1]
curve = curve[0]
#print(curve_intensity)
#print(curve_acceleration)
Circle_list = []
idx = [0,0]
for c in reversed(range(0,len(curve))):
for p in reversed(range(0,len(curve[c]))):
user_dist = circle_space[curve_intensity[c]] + curve_acceleration[c] * p
dist = math.sqrt(math.pow(curve[c][p][0] - curve[idx[0]][idx[1]][0],2)+math.pow(curve [c][p][1] - curve[idx[0]][idx[1]][1],2))
if dist > user_dist:
idx = [c,p]
Circle_list.append(circles.circles(round(curve[c][p][0]), round(curve[c][p][1]), cs, draw, screen))
This place circles depending on the intensity (a number between 0 and 2, random) of the current curve, which equal to an amount of space (let's say between 20 and 30 here, 20 being index 0, 30 being index 2 and a number between these 2 being index 1).
This create the stack you see above and isn't what i want, i also came to the conclusion that i cannot use acceleration since the amount of time to move between 2 points depend on the amount of circles i need to click on, knowing that there are multiple circles between each points, but not being able to determine how many lead to me being unable to the the classic acceleration formula.
So I'm running out of options here and ideas on how to transition from an amount of space to another.
any idea?
PS: i scrapped the idea above and switched back to my master branch but the code for this is still available in the branch i created here https://github.com/Mrcubix/Osu-StreamGenerator/tree/acceleration .
So now I'm back with my normal code that don't possess acceleration or deceleration.
TL:DR i can't use acceleration since i don't know the amount of circles that are going to be placed between the 2 points and make the time of travel vary (i need for exemple to click circles at 180 bpm of one circle every 0.333s) so I'm looking for another way to generate gradually changing space.
First, i took my function that was generating the intensity for each curves in [0 ; 2]
Then i scrapped the acceleration formula as it's unusable.
Now i'm using a basic algorithm to determine the maximum amount of circles i can place on a curve.
Now the way my script work is the following:
i first generate a stream (multiple circles that need to be clicked at high bpm)
this way i obtain the length of each curves (or segments) of the polyline.
i generate an intensity for each curve using the following function:
def generate_intensity(Circle_list: list = None, circle_space: int = None, Args: list = None):
curve_intensity = []
if not Args or Args[0] == "NewProfile":
prompt = True
while prompt:
max_duration_intensity = input("Choose the maximum amount of curve the change in intensity will occur for: ")
if max_duration_intensity.isdigit():
max_duration_intensity = int(max_duration_intensity)
prompt = False
prompt = True
while prompt:
intensity_change_odds = input("Choose the odds of occurence for changes in intensity (1-100): ")
if intensity_change_odds.isdigit():
intensity_change_odds = int(intensity_change_odds)
if 0 < intensity_change_odds <= 100:
prompt = False
prompt = True
while prompt:
min_intensity = input("Choose the lowest amount of spacing a circle will have: ")
if min_intensity.isdigit():
min_intensity = float(min_intensity)
if min_intensity < circle_space:
prompt = False
prompt = True
while prompt:
max_intensity = input("Choose the highest amount of spacing a circle will have: ")
if max_intensity.isdigit():
max_intensity = float(max_intensity)
if max_intensity > circle_space:
prompt = False
prompt = True
if Args:
if Args[0] == "NewProfile":
return [max_duration_intensity, intensity_change_odds, min_intensity, max_intensity]
elif Args[0] == "GenMap":
max_duration_intensity = Args[1]
intensity_change_odds = Args[2]
min_intensity = Args[3]
max_intensity = Args[4]
circle_space = ([min_intensity, circle_space, max_intensity] if not Args else [Args[0][3],circle_space,Args[0][4]])
count = 0
for idx, i in enumerate(Circle_list):
if idx == len(Circle_list) - 1:
if random.randint(0,100) < intensity_change_odds:
if random.randint(0,100) > 50:
curve_intensity.append(2)
else:
curve_intensity.append(0)
else:
curve_intensity.append(1)
if random.randint(0,100) < intensity_change_odds:
if random.randint(0,100) > 50:
curve_intensity.append(2)
count += 1
else:
curve_intensity.append(0)
count += 1
else:
if curve_intensity:
if curve_intensity[-1] == 2 and not count+1 > max_duration_intensity:
curve_intensity.append(2)
count += 1
continue
elif curve_intensity[-1] == 0 and not count+1 > max_duration_intensity:
curve_intensity.append(0)
count += 1
continue
elif count+1 > 2:
curve_intensity.append(1)
count = 0
continue
else:
curve_intensity.append(1)
else:
curve_intensity.append(1)
curve_intensity.reverse()
if curve_intensity.count(curve_intensity[0]) == len(curve_intensity):
print("Intensity didn't change")
return circle_space[1]
print("\n")
return [circle_space, curve_intensity]
with this, i obtain 2 list, one with the spacing i specified, and the second one is the list of randomly generated intensity.
from there i call another function taking into argument the polyline, the previously specified spacings and the generated intensity:
def acceleration_algorithm(polyline, circle_space, curve_intensity):
new_circle_spacing = []
for idx in range(len(polyline)): #repeat 4 times
spacing = []
Length = 0
best_spacing = 0
for p_idx in range(len(polyline[idx])-1): #repeat 1000 times / p_idx in [0 ; 1000]
# Create multiple list containing spacing going from circle_space[curve_intensity[idx-1]] to circle_space[curve_intensity[idx]]
spacing.append(np.linspace(circle_space[curve_intensity[idx]],circle_space[curve_intensity[idx+1]], p_idx).tolist())
# Sum distance to find length of curve
Length += abs(math.sqrt((polyline[idx][p_idx+1][0] - polyline[idx][p_idx][0]) ** 2 + (polyline [idx][p_idx+1][1] - polyline[idx][p_idx][1]) ** 2))
for s in range(len(spacing)): # probably has 1000 list in 1 list
length_left = Length # Make sure to reset length for each iteration
for dist in spacing[s]: # substract the specified int in spacing[s]
length_left -= dist
if length_left > 0:
best_spacing = s
else: # Since length < 0, use previous working index (best_spacing), could also jsut do `s-1`
if spacing[best_spacing] == []:
new_circle_spacing.append([circle_space[1]])
continue
new_circle_spacing.append(spacing[best_spacing])
break
return new_circle_spacing
with this, i obtain a list with the space between each circles that are going to be placed,
from there, i can Call Place_circles() again, and obtain the new stream:
def Place_circles(polyline, circle_space, cs, DoDrawCircle=True, surface=None):
Circle_list = []
curve = []
next_circle_space = None
dist = 0
for c in reversed(range(0, len(polyline))):
curve = []
if type(circle_space) == list:
iter_circle_space = iter(circle_space[c])
next_circle_space = next(iter_circle_space, circle_space[c][-1])
for p in reversed(range(len(polyline[c])-1)):
dist += math.sqrt((polyline[c][p+1][0] - polyline[c][p][0]) ** 2 + (polyline [c][p+1][1] - polyline[c][p][1]) ** 2)
if dist > (circle_space if type(circle_space) == int else next_circle_space):
dist = 0
curve.append(circles.circles(round(polyline[c][p][0]), round(polyline[c][p][1]), cs, DoDrawCircle, surface))
if type(circle_space) == list:
next_circle_space = next(iter_circle_space, circle_space[c][-1])
Circle_list.append(curve)
return Circle_list
the result is a stream with varying space between circles (so accelerating or decelerating), the only issue left to be fixed is pygame not updating the screen with the new set of circle after i call Place_circles(), but that's an issue i'm either going to try to fix myself or ask in another post
the final code for this feature can be found on my repo : https://github.com/Mrcubix/Osu-StreamGenerator/tree/Acceleration_v02
I'm currently doing a project, and in the code I have, I'm trying to get trees .*. and mountains .^. to spawn in groups around the first tree or mountain which is spawned randomly, however, I can't figure out how to get the trees and mountains to spawn in groups around a single randomly generated point. Any help?
grid = []
def draw_board():
row = 0
for i in range(0,625):
if grid[i] == 1:
print("..."),
elif grid[i] == 2:
print("..."),
elif grid[i] == 3:
print(".*."),
elif grid[i] == 4:
print(".^."),
elif grid[i] == 5:
print("[T]"),
else:
print("ERR"),
row = row + 1
if row == 25:
print ("\n")
row = 0
return
There's a number of ways you can do it.
Firstly, you can just simulate the groups directly, i.e. pick a range on the grid and fill it with a specific figure.
def generate_grid(size):
grid = [0] * size
right = 0
while right < size:
left = right
repeat = min(random.randint(1, 5), size - right) # *
right = left + repeat
grid[left:right] = [random.choice(figures)] * repeat
return grid
Note that the group size need not to be uniformly distributed, you can use any convenient distribution, e.g. Poisson.
Secondly, you can use a Markov Chain. In this case group lengths will implicitly follow a Geometric distribution. Here's the code:
def transition_matrix(A):
"""Ensures that each row of transition matrix sums to 1."""
copy = []
for i, row in enumerate(A):
total = sum(row)
copy.append([item / total for item in row])
return copy
def generate_grid(size):
# Transition matrix ``A`` defines the probability of
# changing from figure i to figure j for each pair
# of figures i and j. The grouping effect can be
# obtained by setting diagonal entries A[i][i] to
# larger values.
#
# You need to specify this manually.
A = transition_matrix([[5, 1],
[1, 5]]) # Assuming 2 figures.
grid = [random.choice(figures)]
for i in range(1, size):
current = grid[-1]
next = choice(figures, A[current])
grid.append(next)
return grid
Where the choice function is explained in this StackOverflow answer.
I have 1,000 objects, each object has 4 attribute lists: a list of words, images, audio files and video files.
I want to compare each object against:
a single object, Ox, from the 1,000.
every other object.
A comparison will be something like:
sum(words in common+ images in common+...).
I want an algorithm that will help me find the closest 5, say, objects to Ox and (a different?) algorithm to find the closest 5 pairs of objects
I've looked into cluster analysis and maximal matching and they don't seem to exactly fit this scenario. I don't want to use these method if something more apt exists, so does this look like a particular type of algorithm to anyone, or can anyone point me in the right direction to applying the algorithms I mentioned to this?
I made an example program for how to solve your first question. But you have to implement ho you want to compare images, audio and videos. And I assume every object has the same length for all lists. To answer your question number two it would be something similar, but with a double loop.
import numpy as np
from random import randint
class Thing:
def __init__(self, words, images, audios, videos):
self.words = words
self.images = images
self.audios = audios
self.videos = videos
def compare(self, other):
score = 0
# Assuming the attribute lists have the same length for both objects
# and that they are sorted in the same manner:
for i in range(len(self.words)):
if self.words[i] == other.words[i]:
score += 1
for i in range(len(self.images)):
if self.images[i] == other.images[i]:
score += 1
# And so one for audio and video. You have to make sure you know
# what method to use for determining when an image/audio/video are
# equal.
return score
N = 1000
things = []
words = np.random.randint(5, size=(N,5))
images = np.random.randint(5, size=(N,5))
audios = np.random.randint(5, size=(N,5))
videos = np.random.randint(5, size=(N,5))
# For testing purposes I assign each attribute to a list (array) containing
# five random integers. I don't know how you actually intend to do it.
for i in xrange(N):
things.append(Thing(words[i], images[i], audios[i], videos[i]))
# I will assume that object number 999 (i=999) is the Ox:
ox = 999
scores = np.zeros(N - 1)
for i in xrange(N - 1):
scores[i] = (things[ox].compare(things[i]))
best = np.argmax(scores)
print "The most similar thing is thing number %d." % best
print
print "Ox attributes:"
print things[ox].words
print things[ox].images
print things[ox].audios
print things[ox].videos
print
print "Best match attributes:"
print things[ox].words
print things[ox].images
print things[ox].audios
print things[ox].videos
EDIT:
Now here is the same program modified sligthly to answer your second question. It turned out to be very simple. I basically just needed to add 4 lines:
Changing scores into a (N,N) array instead of just (N).
Adding for j in xrange(N): and thus creating a double loop.
if i == j:
break
where 3. and 4. is just to make sure that I only compare each pair of things once and not twice and don't compary any things with themselves.
Then there is a few more lines of code that is needed to extract the indices of the 5 largest values in scores. I also reformated the printing so it will be easy to confirm by eye that the printed pairs are actually very similar.
Here comes the new code:
import numpy as np
class Thing:
def __init__(self, words, images, audios, videos):
self.words = words
self.images = images
self.audios = audios
self.videos = videos
def compare(self, other):
score = 0
# Assuming the attribute lists have the same length for both objects
# and that they are sorted in the same manner:
for i in range(len(self.words)):
if self.words[i] == other.words[i]:
score += 1
for i in range(len(self.images)):
if self.images[i] == other.images[i]:
score += 1
for i in range(len(self.audios)):
if self.audios[i] == other.audios[i]:
score += 1
for i in range(len(self.videos)):
if self.videos[i] == other.videos[i]:
score += 1
# You have to make sure you know what method to use for determining
# when an image/audio/video are equal.
return score
N = 1000
things = []
words = np.random.randint(5, size=(N,5))
images = np.random.randint(5, size=(N,5))
audios = np.random.randint(5, size=(N,5))
videos = np.random.randint(5, size=(N,5))
# For testing purposes I assign each attribute to a list (array) containing
# five random integers. I don't know how you actually intend to do it.
for i in xrange(N):
things.append(Thing(words[i], images[i], audios[i], videos[i]))
################################################################################
############################# This is the new part: ############################
################################################################################
scores = np.zeros((N, N))
# Scores will become a triangular matrix where scores[i, j]=value means that
# value is the number of attrributes thing[i] and thing[j] have in common.
for i in xrange(N):
for j in xrange(N):
if i == j:
break
# Break the loop here because:
# * When i==j we would compare thing[i] with itself, and we don't
# want that.
# * For every combination where j>i we would repeat all the
# comparisons for j<i and create duplicates. We don't want that.
scores[i, j] = (things[i].compare(things[j]))
# I want the 5 most similar pairs:
n = 5
# This list will contain a tuple for each of the n most similar pairs:
best_list = []
for k in xrange(n):
ij = np.argmax(scores) # Returns a single integer: ij = i*n + j
i = ij / N
j = ij % N
best_list.append((i, j))
# Erease this score so that on next iteration the second largest score
# is found:
scores[i, j] = 0
for k, (i, j) in enumerate(best_list):
# The number 1 most similar pair is the BEST match of all.
# The number N most similar pair is the WORST match of all.
print "The number %d most similar pair is thing number %d and %d." \
% (k+1, i, j)
print "Thing%4d:" % i, \
things[i].words, things[i].images, things[i].audios, things[i].videos
print "Thing%4d:" % j, \
things[j].words, things[j].images, things[j].audios, things[j].videos
print
If your comparison works with "create a sum of all features and find those which the closest sum", there is a simple trick to get close objects:
Put all objects into an array
Calculate all the sums
Sort the array by sum.
If you take any index, the objects close to it will now have a close index as well. So to find the 5 closest objects, you just need to look at index+5 to index-5 in the sorted array.
Actually an image is descretized into 3 bins (0,1,2) .So any color that falls into particular bin is replaced with the bin no.Therefore discretized image can be viewed as this matrix:
a=[[2,1,2,2,1,1],
[2,2,1,2,1,1],
[2,1,3,2,1,1],
[2,2,2,1,1,2],
[2,2,1,1,2,2],
[2,2,1,1,2,2]]
The next step is to compute the connected components. Individual components will be labeled with letters (A;B;C;D;E;F etc) and we will need to keep a table which maintains the discretized color associated with each label, along with the number of pixels with that label. Of course, the same discretized color can be associated with different labels if multiple contiguous regions of the same color exist. The image may then become
b=[[B,C,B,B,A,A],
[B,B,C,B,A,A],
[B,C,D,B,A,A],
[B,B,B,A,A,E],
[B,B,A,A,E,E],
[B,B,A,A,E,E]]
and the connected components table will be:
Label A B C D E
Color 1 2 1 3 1
Size 12 15 3 1 5
Let q=4.The components A, B, and E have more than q pixels, and the components C and D less than q pixels. Therefore the pixels in A;B and E are classied as coherent, while the pixels in C and D are classied as incoherent. The CCV for this image will be
Color : 1 2 3
coherent: 17 15 0
incoherent: 3 0 1
A given color bucket may thus contain only coherent pixels (as does 2), only incoherent pixels
(as does 3), or a mixture of coherent and incoherent pixels (as does 1). If we assume there are only 3 possible discretized colors, the CCV can also be written as
<(17; 3) ; (15; 0) ; (0; 1)>
for three colors
Please anybody help me with the algorithm for finding connected components
I have implemented iterative dfs and recursive dfs ,but both seem to be inefficient ,they take nearly 30 minutes to compute connected components of an image.Anybody help me how to find it ?I'm running out of time I have to submit my project. I'm pasting both my codes:
Image size:384*256
code using recursive dfs:
import cv2
import sys
from PIL import Image
import ImageFilter
import numpy
import PIL.Image
from numpy import array
stack=[]
z=0
sys.setrecursionlimit(9000000)
def main():
imageFile='C:\Users\Abhi\Desktop\cbir-p\New folder\gray_image.jpg'
size = Image.open(imageFile).size
print size
im=Image.open(imageFile)
inimgli=[]
for x in range(size[0]):
inimgli.append([])
for y in range(size[1]):
inten=im.getpixel((x,y))
inimgli[x].append(inten)
for item in inimgli:
item.insert(0,0)
item.append(0)
inimgli.insert(0,[0]*len(inimgli[0]))
inimgli.append([0]*len(inimgli[0]))
blurimg=[]
for i in range(1,len(inimgli)-1):
blurimg.append([])
for j in range(1,len(inimgli[0])-1):
blurimg[i-1].append((inimgli[i-1][j-1]+inimgli[i-1][j]+inimgli[i-1][j+1]+inimgli[i][j-1]+inimgli[i][j]+inimgli[i][j+1]+inimgli[i+1][j-1]+inimgli[i+1][j]+inimgli[i+1][j+1])/9)
#print blurimg
displi=numpy.array(blurimg).T
im1 = Image.fromarray(displi)
im1.show()
#i1.save('gray.png')
descretize(blurimg)
def descretize(rblurimg):
count=-1
desc={}
for i in range(64):
descli=[]
for t in range(4):
count=count+1
descli.append(count)
desc[i]=descli
del descli
#print len(rblurimg),len(rblurimg[0])
#print desc
drblur=[]
for x in range(len(rblurimg)):
drblur.append([])
for y in range(len(rblurimg[0])):
for item in desc:
if rblurimg[x][y] in desc[item]:
drblur[x].append(item)
#displi1=numpy.array(drblur).T
#im1 = Image.fromarray(displi1)
#im1.show()
#im1.save('xyz.tif')
#print drblur
connected(drblur)
def connected(rdrblur):
table={}
#print len(rdrblur),len(rdrblur[0])
for item in rdrblur:
item.insert(0,0)
item.append(0)
#print len(rdrblur),len(rdrblur[0])
rdrblur.insert(0,[0]*len(rdrblur[0]))
rdrblur.append([0]*len(rdrblur[0]))
copy=[]
for item in rdrblur:
copy.append(item[:])
global z
count=0
for i in range(1,len(rdrblur)-1):
for j in range(1,len(rdrblur[0])-1):
if (i,j) not in stack:
if rdrblur[i][j]==copy[i][j]:
z=0
times=dfs(i,j,str(count),rdrblur,copy)
table[count]=(rdrblur[i][j],times+1)
count=count+1
#z=0
#times=dfs(1,255,str(count),rdrblur,copy)
#print times
#print stack
stack1=[]
#copy.pop()
#copy.pop(0)
#print c
#print table
for item in table.values():
stack1.append(item)
#print stack1
table2={}
for v in range(64):
table2[v]={'coherent':0,'incoherent':0}
#for item in stack1:
# if item[0] not in table2.keys():
# table2[item[0]]={'coherent':0,'incoherent':0}
for item in stack1:
if item[1]>300:
table2[item[0]]['coherent']=table2[item[0]]['coherent']+item[1]
else:
table2[item[0]]['incoherent']=table2[item[0]]['incoherent']+item[1]
print table2
#return table2
def dfs(x,y,co,b,c):
dx = [-1,-1,-1,0,0,1,1,1]
dy = [-1,0,1,-1,1,-1,0,1]
global z
#print x,y,co
c[x][y]=co
stack.append((x,y))
#print dx ,dy
for i in range(8):
nx = x+(dx[i])
ny = y+(dy[i])
#print nx,ny
if b[x][y] == c[nx][ny]:
dfs(nx,ny,co,b,c)
z=z+1
return z
if __name__ == '__main__':
main()
iterative dfs:
def main():
imageFile='C:\Users\Abhi\Desktop\cbir-p\New folder\gray_image.jpg'
size = Image.open(imageFile).size
print size
im=Image.open(imageFile)
inimgli=[]
for x in range(size[0]):
inimgli.append([])
for y in range(size[1]):
inten=im.getpixel((x,y))
inimgli[x].append(inten)
for item in inimgli:
item.insert(0,0)
item.append(0)
inimgli.insert(0,[0]*len(inimgli[0]))
inimgli.append([0]*len(inimgli[0]))
blurimg=[]
for i in range(1,len(inimgli)-1):
blurimg.append([])
for j in range(1,len(inimgli[0])-1):
blurimg[i-1].append((inimgli[i-1][j-1]+inimgli[i-1][j]+inimgli[i-1][j+1]+inimgli[i][j-1]+inimgli[i][j]+inimgli[i][j+1]+inimgli[i+1][j-1]+inimgli[i+1][j]+inimgli[i+1][j+1])/9)
#print blurimg
#displi=numpy.array(blurimg).T
#im1 = Image.fromarray(displi)
#im1.show()
#i1.save('gray.png')
descretize(blurimg)
def descretize(rblurimg):
count=-1
desc={}
for i in range(64):
descli=[]
for t in range(4):
count=count+1
descli.append(count)
desc[i]=descli
del descli
#print len(rblurimg),len(rblurimg[0])
#print desc
drblur=[]
for x in range(len(rblurimg)):
drblur.append([])
for y in range(len(rblurimg[0])):
for item in desc:
if rblurimg[x][y] in desc[item]:
drblur[x].append(item)
#displi1=numpy.array(drblur).T
#im1 = Image.fromarray(displi1)
#im1.show()
#im1.save('xyz.tif')
#print drblur
connected(drblur)
def connected(rdrblur):
for item in rdrblur:
item.insert(0,0)
item.append(0)
#print len(rdrblur),len(rdrblur[0])
rdrblur.insert(0,[0]*len(rdrblur[0]))
rdrblur.append([0]*len(rdrblur[0]))
#print len(rdrblur),len(rdrblur[0])
copy=[]
for item in rdrblur:
copy.append(item[:])
count=0
#temp=0
#print len(alpha)
for i in range(1,len(rdrblur)-1):
for j in range(1,len(rdrblur[0])-1):
if (i,j) not in visited:
dfs(i,j,count,rdrblur,copy)
count=count+1
print "success"
def dfs(x,y,co,b,c):
global z
#print x,y,co
stack=[]
c[x][y]=str(co)
visited.append((x,y))
stack.append((x,y))
while len(stack) != 0:
exstack=find_neighbors(stack.pop(),co,b,c)
stack.extend(exstack)
#print visited
#print stack
#print len(visited)
#print c
'''while (len(stack)!=0):
(x1,y1)=stack.pop()
exstack=find_neighbors(x1,y1)
stack.extend(exstack)'''
def find_neighbors((x2,y2),cin,b,c):
#print x2,y2
neighborli=[]
for i in range(8):
x=x2+(dx[i])
y=y2+(dy[i])
if (x,y) not in visited:
if b[x2][y2]==b[x][y]:
visited.append((x,y))
c[x][y]=str(cin)
neighborli.append((x,y))
return neighborli
if __name__ == '__main__':
main()
Here's another post I have answered which doing exactly the same thing
which include a sample code using simply DFS.
How do I find the connected components in a binary image?
Modify the DFS function: add one parameter current_color = {0,1,2}, so that you can decide if you can go to another node from this node or not. (If the nabouring node has same color with current_color and not yet visit, recurssively visit that node)
The DFS is good algorithm but the recursive algorithm is space inefficient and non recursive one is very complex so I would advice connected component labelling algorithm which uses disjoint-set datastructure in two pass to get solution in non recursive way in linear time.
Note: Use image processing libraries for the same as they do have parallel fast implementation.
I had a similar issue, but in 3D, and asked a question about that here:
Increasing efficiency of union-find
I found the union-find algorithm to be much faster than anything else for my case (which makes sense given the complexity)
I have a list of objects (Chromosome) which have an attribute fitness (chromosome.fitness is between 0 and 1)
Given a list of such objects, how can I implement a function which returns a single chromosome whose chance of being selected is proportional to its fitness? That is, a chromosome with fitness 0.8 is twice as likely to be selected as one with fitness 0.4.
I've found a few Python and pseudocode implementations, but they are too complex for this requirement: the function needs only a list of chromosomes. Chromosomes store their own fitness as an internal variable.
The implementation I already wrote was before I decided to allow chromosomes to store their own fitness, so was a lot more complicated and involved zipping lists and things.
----------------------------EDIT----------------------------
Thanks Lattyware. The following function seems to work.
def selectOne(self, population):
max = sum([c.fitness for c in population])
pick = random.uniform(0, max)
current = 0
for chromosome in population:
current += chromosome.fitness
if current > pick:
return chromosome
Use numpy.random.choice.
import numpy.random as npr
def selectOne(self, population):
max = sum([c.fitness for c in population])
selection_probs = [c.fitness/max for c in population]
return population[npr.choice(len(population), p=selection_probs)]
There is a very simple way to select a weighted random choice from a dictionary:
def weighted_random_choice(choices):
max = sum(choices.values())
pick = random.uniform(0, max)
current = 0
for key, value in choices.items():
current += value
if current > pick:
return key
If you don't have a dictionary at hand, you could modify this to suit your class (as you haven't given more details of it, or generate a dictionary:
choices = {chromosome: chromosome.fitness for chromosome in chromosomes}
Presuming that fitness is an attribute.
Here is an example of the function modified to take an iterable of chromosomes, again, making the same presumption.
def weighted_random_choice(chromosomes):
max = sum(chromosome.fitness for chromosome in chromosomes)
pick = random.uniform(0, max)
current = 0
for chromosome in chromosomes:
current += chromosome.fitness
if current > pick:
return chromosome
I'd prefer fewer lines:
import itertools
def choose(population):
bounds = list(itertools.accumulate(chromosome.fitness for chromosome in population))
pick = random.random() * bounds[-1]
return next(chromosome for chromosome, bound in zip(population, bounds) if pick < bound)
def Indvs_wieght(Indvs): # to comput probality of selecting each Indvs by its fitness
s=1
s=sum(i.fitness for i in Indvs)
wieghts = list()
for i in range(len(Indvs)) :
wieghts.append(Indvs[i].fitness/s)
return wieghts
def select_parents(indvs,indvs_wieghts,number_of_parents=40): # Roulette Wheel Selection method #number of selected parent
return np.random.choice(indvs,size=number_of_parents,p=indvs_wieghts)
from __future__ import division
import numpy as np
import random,pdb
import operator
def roulette_selection(weights):
'''performs weighted selection or roulette wheel selection on a list
and returns the index selected from the list'''
# sort the weights in ascending order
sorted_indexed_weights = sorted(enumerate(weights), key=operator.itemgetter(1));
indices, sorted_weights = zip(*sorted_indexed_weights);
# calculate the cumulative probability
tot_sum=sum(sorted_weights)
prob = [x/tot_sum for x in sorted_weights]
cum_prob=np.cumsum(prob)
# select a random a number in the range [0,1]
random_num=random.random()
for index_value, cum_prob_value in zip(indices,cum_prob):
if random_num < cum_prob_value:
return index_value
if __name__ == "__main__":
weights=[1,2,6,4,3,7,20]
print (roulette_selection(weights))
weights=[1,2,2,2,2,2,2]
print (roulette_selection(weights))
import random
def weighted_choice(items):
total_weight = sum(item.weight for item in items)
weight_to_target = random.uniform(0, total_weight)
for item in items:
weight_to_target -= item.weight
if weight_to_target <= 0:
return item