Python: how to run xml file via cmd (xml.etree.ElementTree) - python

I made a simple ray tracer python program that takes xml file as input and creates a png image file.
In the python file, I used xml.etree.ElementTree as extension and implemented parser to read the xml file.
I'm trying to run the program on cmd like:
c:\WINDOWS\system32>python3 rayTracer.py scenes/one-sphere.xml
However, it doesn't create an image.
Did I use wrong command on cmd?
(one-sphere.xml file is in the scenes file)
this is main
def main():
#parser
tree = ET.parse(sys.argv[1])
root = tree.getroot()
viewPoint = np.array([0, 0, 0]).astype(np.float)
viewDir = np.array([0, 0, -1]).astype(np.float)
viewUp = np.array([0, 1, 0]).astype(np.float)
viewProjNormal = -1 * viewDir
viewWidth = 1.0
viewHeight = 1.0
projDistance = 1.0
intensity = np.array([1, 1, 1]).astype(np.float) # how bright the light is.
imgSize = np.array(root.findtext('image').split()).astype(np.int)
list = []
light = []
for c in root.findall('camera'):
viewPoint = np.array(c.findtext('viewPoint').split()).astype(np.float)
viewDir = np.array(c.findtext('viewDir').split()).astype(np.float)
if (c.findtext('projNormal')):
viewProjNormal = np.array(c.findtext('projNormal').split()).astype(np.float)
viewUp = np.array(c.findtext('viewUp').split()).astype(np.float)
if (c.findtext('projDistance')):
projDistance = np.array(c.findtext('projDistance').split()).astype(np.float)
viewWidth = np.array(c.findtext('viewWidth').split()).astype(np.float)
viewHeight = np.array(c.findtext('viewHeight').split()).astype(np.float)
view = View(viewPoint, viewDir, viewUp, viewProjNormal, viewWidth, viewHeight, projDistance, intensity)
for c in root.findall('surface'):
type_c = c.get('type')
if type_c == 'Sphere':
center_c = np.array(c.findtext('center').split()).astype(np.float)
radius_c = np.array(c.findtext('radius')).astype(np.float)
ref = ''
for child in c:
if child.tag == 'shader':
ref = child.get('ref')
for d in root.findall('shader'):
if d.get('name') == ref:
diffuse_d = np.array(d.findtext('diffuseColor').split()).astype(np.float)
type_d = d.get('type')
for c in root.findall('light'):
position_c = np.array(c.findtext('position').split()).astype(np.float)
intensity_c = np.array(c.findtext('intensity').split()).astype(np.float)
light.append(Light(position_c, intensity_c))
# Create an empty image
channels = 3
img = np.zeros((imgSize[1], imgSize[0], channels), dtype=np.uint8)
img[:, :] = 0
pixel_x = view.viewWidth / imgSize[0]
pixel_y = view.viewHeight / imgSize[1]
w = view.viewDir
u = np.cross(w, view.viewUp)
v = np.cross(w, u)
w_unit = w / np.sqrt(np.sum(w * w))
u_unit = u / np.sqrt(np.sum(u * u))
v_unit = v / np.sqrt(np.sum(v * v))
start = w_unit * view.projDistance - u_unit * pixel_x * ((imgSize[0]/2) + 1/2) - v_unit * pixel_y * ((imgSize[1]/2) + 1/2)
for x in np.arange(imgSize[0]):
for y in np.arange(imgSize[1]):
ray = start + u_unit * x * pixel_x + pixel_y * y * v_unit
tmp = raytrace(list, ray, view.viewPoint)
img[y][x] = shade(tmp[0], ray, view, list, tmp[1], light)
rawimg = Image.fromarray(img, 'RGB')
rawimg.save(sys.argv[1] + '.png')
if __name__ == "__main__":
main()

Related

Python function calling with variable vs raw numbers

I am trying to implement a pso algorithm from Wikipedia https://en.wikipedia.org/wiki/Particle_swarm_optimization.
My problem is that when I am calling the cost function with a variable (Gbest), and then manually calling the cost function (with the Gbest data) I get a different output (cost) like the image bellow:
Code fault
I am new to python so thank you for any suggestions.
Here is the complete code:
import matplotlib.pyplot as plt
import numpy as np
from control.matlab import *
A = np.array([[0,0,1],[0,1,0],[1,2,-2]])
B = np.array( [[0],[1],[0]])
C = np.array([[0, 1,0]])
D = np.zeros([C.shape[0],B.shape[1]])
sys = ss(A,B,C,D)
sys_tf = tf(sys)
s = tf('s')
def cost(kp,ki):
global sys_tf, G, y, t, r
G = kp + ki/s
C = feedback(sys_tf*G, 1)
y, t = step(C, linspace(0,100))
r = np.ones(len(t))
return np.sum(y-r)**2
part = 100
ite = 10000
dim = 2
w = 0.001
wdamp = 0.99
phip = 0.9
phig = 0.1
blo, bup = -10,10
x = np.zeros([dim, part])
v = np.zeros([dim, part])
pbest = np.zeros([dim, part])
gbest = np.array([1000000,1000000])
for i in range(part):
for k in range(dim):
x[k][i] = pbest[k][i] = np.random.uniform(blo, bup)
v[k][i] = np.random.uniform(-np.abs(bup - blo), np.abs(bup - blo))
if cost(pbest[0][i], pbest[1][i]) < cost(gbest[0], gbest[1]):
gbest = np.array([pbest[0][i], pbest[1][i]])
for it in range(ite):
for i in range(part):
for k in range(dim):
rp = np.random.uniform(0,1)
rg = np.random.uniform(0,1)
v[k,:] = w*v[k,:] + phip*rp*(pbest[k,:] - x[k,:]) + phig*rg*(gbest[k] - x[k,:])
x[k,:] = x[k,:] + v[k,:]
w = w*wdamp
if cost(x[0][i], x[1][i]) < cost(pbest[0][i], pbest[1][i]):
pbest[:,i] = x[:,i]
if cost(pbest[0][i], pbest[1][i]) < cost(gbest[0], gbest[1]):
gbest = np.array([pbest[0][i], pbest[1][i]])
plt.plot(t, y, 'ro')
plt.plot(t, r, 'x')
plt.pause(0.005)
plt.title(gbest)
print([gbest, cost(gbest[0], gbest[1])])

Extracting Minutiae Terminations and Bifurcations values from fingerprint-feature-extractor in Python 3

I recently tried the new fingerprint feature extractor library by Utkarsh-Deshmukh (https://github.com/Utkarsh-Deshmukh/Fingerprint-Feature-Extraction) and it works like wonder. The problem is I need to extract terminations and bifurcations value from the library. I used this code that's included in the github link to get features bifurcations and terminations:
import fingerprint_feature_extractor
img = cv2.imread('image_path', 0)
FeaturesTerminations, FeaturesBifurcations = fingerprint_feature_extractor.extract_minutiae_features(img, showResult=True, spuriousMinutiaeThresh=10)
I tried using print() command to see what's inside FeaturesBifurcations and I can't understand what the output means.
print() command on FeaturesBifurcations
What I needed is values that looks like this where first and second column indicates xy coordinates, third column indicates orientations, and fourth column indicate type:
minutiaes bifurcations (marked with 1 in the last column) and terminations (marked with 0 in the last column) values extracted from fingerprint
I tried reading the classes in the library and I figure I could get those values (features locations, orientations, and type is explicitly stated in the library), but i do not know how to extract those values. This is what is inside the fingerprint-feature-extractor library:
import cv2
import numpy as np
import skimage.morphology
from skimage.morphology import convex_hull_image, erosion
from skimage.morphology import square
import math
class MinutiaeFeature(object):
def __init__(self, locX, locY, Orientation, Type):
self.locX = locX;
self.locY = locY;
self.Orientation = Orientation;
self.Type = Type;
class FingerprintFeatureExtractor(object):
def __init__(self):
self._mask = []
self._skel = []
self.minutiaeTerm = []
self.minutiaeBif = []
def __skeletonize(self, img):
img = np.uint8(img > 128)
self._skel = skimage.morphology.skeletonize(img)
self._skel = np.uint8(self._skel) * 255
self._mask = img * 255
def __computeAngle(self, block, minutiaeType):
angle = []
(blkRows, blkCols) = np.shape(block);
CenterX, CenterY = (blkRows - 1) / 2, (blkCols - 1) / 2
if (minutiaeType.lower() == 'termination'):
sumVal = 0;
for i in range(blkRows):
for j in range(blkCols):
if ((i == 0 or i == blkRows - 1 or j == 0 or j == blkCols - 1) and block[i][j] != 0):
angle.append(-math.degrees(math.atan2(i - CenterY, j - CenterX)))
sumVal += 1
if (sumVal > 1):
angle.append(float('nan'))
return (angle)
elif (minutiaeType.lower() == 'bifurcation'):
(blkRows, blkCols) = np.shape(block);
CenterX, CenterY = (blkRows - 1) / 2, (blkCols - 1) / 2
angle = []
sumVal = 0;
for i in range(blkRows):
for j in range(blkCols):
if ((i == 0 or i == blkRows - 1 or j == 0 or j == blkCols - 1) and block[i][j] != 0):
angle.append(-math.degrees(math.atan2(i - CenterY, j - CenterX)))
sumVal += 1
if (sumVal != 3):
angle.append(float('nan'))
return (angle)
def __getTerminationBifurcation(self):
self._skel = self._skel == 255;
(rows, cols) = self._skel.shape;
self.minutiaeTerm = np.zeros(self._skel.shape);
self.minutiaeBif = np.zeros(self._skel.shape);
for i in range(1, rows - 1):
for j in range(1, cols - 1):
if (self._skel[i][j] == 1):
block = self._skel[i - 1:i + 2, j - 1:j + 2];
block_val = np.sum(block);
if (block_val == 2):
self.minutiaeTerm[i, j] = 1;
elif (block_val == 4):
self.minutiaeBif[i, j] = 1;
self._mask = convex_hull_image(self._mask > 0)
self._mask = erosion(self._mask, square(5)) # Structuing element for mask erosion = square(5)
self.minutiaeTerm = np.uint8(self._mask) * self.minutiaeTerm
def __removeSpuriousMinutiae(self, minutiaeList, img, thresh):
img = img * 0;
SpuriousMin = [];
numPoints = len(minutiaeList);
D = np.zeros((numPoints, numPoints))
for i in range(1,numPoints):
for j in range(0, i):
(X1,Y1) = minutiaeList[i]['centroid']
(X2,Y2) = minutiaeList[j]['centroid']
dist = np.sqrt((X2-X1)**2 + (Y2-Y1)**2);
D[i][j] = dist
if(dist < thresh):
SpuriousMin.append(i)
SpuriousMin.append(j)
SpuriousMin = np.unique(SpuriousMin)
for i in range(0,numPoints):
if(not i in SpuriousMin):
(X,Y) = np.int16(minutiaeList[i]['centroid']);
img[X,Y] = 1;
img = np.uint8(img);
return(img)
def __cleanMinutiae(self, img):
self.minutiaeTerm = skimage.measure.label(self.minutiaeTerm, connectivity=2);
RP = skimage.measure.regionprops(self.minutiaeTerm)
self.minutiaeTerm = self.__removeSpuriousMinutiae(RP, np.uint8(img), 10);
def __performFeatureExtraction(self):
FeaturesTerm = []
self.minutiaeTerm = skimage.measure.label(self.minutiaeTerm, connectivity=2);
RP = skimage.measure.regionprops(np.uint8(self.minutiaeTerm))
WindowSize = 2 # --> For Termination, the block size must can be 3x3, or 5x5. Hence the window selected is 1 or 2
FeaturesTerm = []
for num, i in enumerate(RP):
print(num)
(row, col) = np.int16(np.round(i['Centroid']))
block = self._skel[row - WindowSize:row + WindowSize + 1, col - WindowSize:col + WindowSize + 1]
angle = self.__computeAngle(block, 'Termination')
if(len(angle) == 1):
FeaturesTerm.append(MinutiaeFeature(row, col, angle, 'Termination'))
FeaturesBif = []
self.minutiaeBif = skimage.measure.label(self.minutiaeBif, connectivity=2);
RP = skimage.measure.regionprops(np.uint8(self.minutiaeBif))
WindowSize = 1 # --> For Bifurcation, the block size must be 3x3. Hence the window selected is 1
for i in RP:
(row, col) = np.int16(np.round(i['Centroid']))
block = self._skel[row - WindowSize:row + WindowSize + 1, col - WindowSize:col + WindowSize + 1]
angle = self.__computeAngle(block, 'Bifurcation')
if(len(angle) == 3):
FeaturesBif.append(MinutiaeFeature(row, col, angle, 'Bifurcation'))
return (FeaturesTerm, FeaturesBif)
def extractMinutiaeFeatures(self, img):
self.__skeletonize(img)
self.__getTerminationBifurcation()
self.__cleanMinutiae(img)
FeaturesTerm, FeaturesBif = self.__performFeatureExtraction()
return(FeaturesTerm, FeaturesBif)
def showResults(self):
BifLabel = skimage.measure.label(self.minutiaeBif, connectivity=2);
TermLabel = skimage.measure.label(self.minutiaeTerm, connectivity=2);
minutiaeBif = TermLabel * 0;
minutiaeTerm = BifLabel * 0;
(rows, cols) = self._skel.shape
DispImg = np.zeros((rows, cols, 3), np.uint8)
DispImg[:, :, 0] = 255*self._skel;
DispImg[:, :, 1] = 255*self._skel;
DispImg[:, :, 2] = 255*self._skel;
RP = skimage.measure.regionprops(BifLabel)
for idx, i in enumerate(RP):
(row, col) = np.int16(np.round(i['Centroid']))
minutiaeBif[row, col] = 1;
(rr, cc) = skimage.draw.circle_perimeter(row, col, 3);
skimage.draw.set_color(DispImg, (rr, cc), (255, 0, 0));
RP = skimage.measure.regionprops(TermLabel)
for idx, i in enumerate(RP):
(row, col) = np.int16(np.round(i['Centroid']))
minutiaeTerm[row, col] = 1;
(rr, cc) = skimage.draw.circle_perimeter(row, col, 3);
skimage.draw.set_color(DispImg, (rr, cc), (0, 0, 255));
cv2.imshow('a', DispImg);
cv2.waitKey(0)
def extract_minutiae_features(img, showResult=False):
feature_extractor = FingerprintFeatureExtractor()
FeaturesTerm, FeaturesBif = feature_extractor.extractMinutiaeFeatures(img)
if(showResult):
feature_extractor.showResults()
return(FeaturesTerm, FeaturesBif)
Is there a way to extract locations, orientations, and type float (and/or integer) values from this library? or is there a way or library to plot those lists to cartesian or polar coordinates?

Why I got ModuleNotFoundError when I try to run the import statement?

I'm trying to import these on Jupyter, however I got an error when I run these code that say ModuleNotFoundError: No module named 'anna_phog'. I have another python file named as 'anna_phog'. How do I fix this?
below is 'anna_phog_demo.py' where I got the error
from anna_phog import anna_phog
import imageio
import matplotlib.pyplot as plt
image_path = "image_0058.jpg"
S = 8
angle = 360
Level = 3
roi = [1,225,1,300]
save=True
Image = imageio.imread(image_path)
p = anna_phog(Image, bin, angle, Level, roi)
print("P: \n{}".format(p))
print(len(p), type(p))
And below is the 'anna_phog.py' code
import numpy as np
import imageio
import cv2
import matplotlib.pyplot as plt
def anna_phog(Img, bin, angle, L, roi):
if Img.shape[2] == 3:
G = cv2.cvtColor(Img, cv2.COLOR_BGR2GRAY)
else:
G = Img
if np.sum(G) > 100:
# apply automatic Canny edge detection using the computed median
sigma = 0.33
v = np.median(G)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
E = cv2.Canny(G,lower,upper) #high and low treshold
GradientX, GradientY = np.gradient(G)
GradientYY = np.gradient(GradientY, axis=1)
Gr = np.sqrt(np.square(GradientX)+np.square(GradientY))
index = GradientX == 0
GradientX[index] = 1e-5 #maybe another value
YX = GradientY*GradientX
if angle == 180: A = ((np.arctan(YX)+(np.pi/2))*180)/np.pi
if angle == 360: A = ((np.arctan2(GradientY,GradientX)+np.pi)*180)/np.pi
bh, bv = anna_BinMatrix(A,E,Gr,angle,bin)
else:
bh = np.zeros(Img.shape)
bv = np.zeros(Img.shape)
bh_roi = bh[roi[0]:roi[1], roi[2]:roi[3]]
bv_roi = bv[roi[0]:roi[1], roi[2]:roi[3]]
p = anna_PhogDescriptor(bh_roi,bv_roi,L,bin)
return p
def anna_BinMatrix(A,E,G,angle,bin):
n, contorns = cv2.connectedComponents(E, connectivity=8)
X = E.shape[1]
Y = E.shape[0]
bm = np.zeros(shape=(Y,X))
bv = np.zeros(shape=(Y,X))
nAngle = angle/bin
for i in range(n):
posY, posX = np.where(contorns==i)
for j in range(posY.shape[0]):
pos_x = posX[j]
pos_y = posY[j]
b = np.ceil(A[pos_y,pos_x]/nAngle)
if b==0: bin=1
if G[pos_y,pos_x]>0:
bm[pos_y,pos_x] = b
bv[pos_y,pos_x] = G[pos_y,pos_x]
return (bm, bv)
def anna_PhogDescriptor(bh,bv,L,bin):
p = np.array([])
#level 0
for b in range(bin):
ind = bh==b
p = np.append(p, np.sum(bv[ind]))
#higher levels
for l in range(1, L+1):
x = int(np.trunc(bh.shape[1]/(2**l)))
y = int(np.trunc(bh.shape[0]/(2**l)))
for xx in range(0, bh.shape[1]-x+1, x):
for yy in range(0, bh.shape[0]-y+1, y):
print(l)
bh_cella = bh[yy:yy+y, xx:xx+x]
bv_cella = bv[yy:yy+y, xx:xx+x]
for b in range(bin):
ind = bh_cella==b
p = np.append(p, np.sum(bv_cella[ind], axis=0))
if np.sum(p)!=0:
p = p/np.sum(p)
return p
Below is the screenshot of the folder where I put these file
folder path

Why does translation matrix not affect rendering of 3d model in python?

I am very new to coding and decided to start with a 3D engine in python. I followed OLC's tutorial and adapted it for python. However, my translation matrix doesn't seem to be affecting how the model is rendered. Any advice at all, from code improvements, to telling me how bad I am would be much appreciated.
I have tried to re-do the vector multiplication in all sorts of ways, and all kinds of reformatting and re-trying, but I am not even sure what I am looking for in this case. Any help would be much appreciated
As I am new, I apologize for any poor etiquette I may have done on this site.
import math, numpy as np, pygame, sys, os, random, string
from pygame.locals import *
pygame.init()
width,height = 1600,900;cx,cy = width//2,height//2
screen = pygame.display.set_mode((width,height))
clock = pygame.time.Clock()
font = pygame.font.SysFont("Arial", 18)
pygame.display.set_caption('3D Graphics')
def loadObj(filename):
tris = []
verts = []
try:
fp = open(filename, "r")
except:
print("File: "+filename+" not found")
sys.exit(1)
for line in fp:
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
#v = vec3(float(values[1]), float(values[2]), float(values[3]))
#verts.append(vec3(int(values[1]), int(values[2]), int(values[3])))
verts.append(vec3(float(values[1]), float(values[2]), float(values[3]), 0 ))
#verts.append(v)
elif values[0] == 'f':
p = []
for v in values[1:]:
w = v.split("/")
p.append(int(w[0]))
#print(p)
#print(verts[-185])
triTemp = triangle(verts[p[0] - 1], verts[p[1] - 1], verts[p[2]- 1])
tris.append(triTemp)
fp.close()
return tris
class vec3(): #Possibly Obsolete
__slots__ = ['x','y','z','w']
def __init__(self, x, y, z, w):
self.x = x
self.y = y
self.z = z
self.w = w
class triangle():
__slots__ = ['vec1','vec2','vec3']
def __init__(self, vec1, vec2, vec3):
self.vec1 = vec1
self.vec2 = vec2
self.vec3 = vec3
def matrixMakeTranslation(x, y, z):
matrix = np.zeros((4,4))
matrix[0,0] = 1.0
matrix[1,1] = 1.0
matrix[2,2] = 1.0
matrix[3,3] = 1.0
matrix[3,0] = x
matrix[3,1] = y
matrix[3,2] = z
return matrix
def Matrix_MakeRotationX(fAngleRad):
matrix = np.zeros((4,4))
matrix[0,0] = 1.0
matrix[1,1] = (math.cos(fAngleRad * 0.5))
matrix[1,2] = (math.sin(fAngleRad * 0.5))
matrix[2,1] = (-math.sin(fAngleRad * 0.5))
matrix[2,2] = (math.cos(fAngleRad * 0.5))
matrix[3,3] = 1.0
return matrix
def Matrix_MakeRotationZ(fAngleRad):
matrix = np.zeros((4,4))
matrix[0,0] = (math.cos(fAngleRad))
matrix[0,1] = (math.sin(fAngleRad))
matrix[1,0] = (-math.sin(fAngleRad))
matrix[1,1] = (math.cos(fAngleRad))
matrix[2,2] = 1.0
matrix[3,3] = 1.0
return matrix
fNear = float(0.1) #Create the Projection Matrix
fFar = float(1000.0)
fFov = float(90.0)
fAspectRatio = float(height/width)
fFovRad = 1/math.tan(fFov * 0.5 / 180 * math.pi)
projectionMatrix = np.zeros((4,4))
projectionMatrix[0,0] = fAspectRatio * fFovRad
projectionMatrix[1,1] = fFovRad
projectionMatrix[2,2] = fFar / (fFar - fNear)
projectionMatrix[3,2] = float((-fFar * fNear) / (fFar - fNear))
projectionMatrix[2,3] = 1.0
projectionMatrix[3,3] = 0.0
meshname = "teapot.obj" #Load the mesh
tris = loadObj(meshname)
vCamera = np.array([0,0,0,0])
fAngleRad = 0
colour = (255,255,255)
colour2 = (0,0,0)
triProjected = triangle(np.array([0,0,0,0]),np.array([0,0,0,0]),np.array([0,0,0,0])) #These are used later
triTranslalted = triangle(np.array([0,0,0,0]),np.array([0,0,0,0]),np.array([0,0,0,0]))
triTransformed= triangle(np.array([0,0,0,0]),np.array([0,0,0,0]),np.array([0,0,0,0]))
while True: #Begin Loop
for event in pygame.event.get(): #Quit
if event.type == pygame.QUIT: pygame.quit(); sys.exit()
dt = clock.tick()/1000
pygame.display.set_caption('3D Graphics - FPS: %.2f'%int(dt))
print("fps:", clock.get_fps()) #Framerate and caption
pygame.display.update()
screen.fill((0,0,0))
fAngleRad += 0.1
matRotZ = Matrix_MakeRotationZ(fAngleRad * 0.5) #Set up matricies
matRotX = Matrix_MakeRotationX(fAngleRad)
matTrans = matrixMakeTranslation(0.0,0.0,50.0)
matWorld = np.identity(4)
matWorld = matRotZ # matRotX
matWorld = matWorld # matTrans #Seems to be broken. idk why.
for i in tris: #For triangle in all triangles
reDo1 = np.array([i.vec1.x, i.vec1.y, i.vec1.z, i.vec1.w])
reDo2 = np.array([i.vec2.x, i.vec2.y, i.vec2.z, i.vec2.w])
reDo3 = np.array([i.vec3.x, i.vec3.y, i.vec3.z, i.vec3.w])
triTransformed.vec1 = np.matmul(matWorld, reDo1)
triTransformed.vec2 = np.matmul(matWorld, reDo2)
triTransformed.vec3 = np.matmul(matWorld, reDo3)
triProjected.vec1 = np.matmul(projectionMatrix, triTransformed.vec1)
triProjected.vec2 = np.matmul(projectionMatrix, triTransformed.vec2)
triProjected.vec3 = np.matmul(projectionMatrix, triTransformed.vec3)
#Scale Into View
triProjected.vec1[0] += 1.0
triProjected.vec1[1] += 1.0
triProjected.vec2[0] += 1.0
triProjected.vec2[1] += 1.0
triProjected.vec3[0] += 1.0
triProjected.vec3[1] += 1.0
triProjected.vec1[0] *= 0.5 * width
triProjected.vec1[1] *= 0.5 * height
triProjected.vec2[0] *= 0.5 * width
triProjected.vec2[1] *= 0.5 * height
triProjected.vec3[0] *= 0.5 * width
triProjected.vec3[1] *= 0.5 * height
pygame.draw.polygon(screen, colour, [(triProjected.vec1[0], triProjected.vec1[1]),(triProjected.vec2[0], triProjected.vec2[1]),(triProjected.vec3[0], triProjected.vec3[1])])
You are using Homogenious Coordinates to represent the vertices of your model. So the W component must be 1.
When you load your model you are setting W to 0.
verts.append(vec3(float(values[1]), float(values[2]), float(values[3]), 0 ))
By setting W=0 you are creating a Homogenious Vector (a.k.a "point at infinity" or "ideal point") and by setting W=1 you are creating a Homogeneous Point.
Points can be translated but vectors cannot.
https://en.m.wikipedia.org/wiki/Homogeneous_coordinates

Optimizing a complex algorithm

I know this is not an ideal place for questions of this scope, but I'm not sure where else to ask this or how to break it down. I've been working on a function for the past couple weeks, that runs, but for it to be feasible for my purposes, I need to speed it up 200-300x.
I have an image array, where all pixels of similar color have been averaged and set to that average value. Then I have a 2D array of the same height and width, which labels each unique and non-contiguous feature of the image.
Using these I need to assess the size of each feature and its level of contrast to each of its neighbors. These values are used in an equation and if the output of that equation is below a certain threshold, that feature is merged with its most similar neighbor.
I've uploaded the image and the feature label array (printed with numpy.savetext()) to OneDrive and attached links
code:
def textureRemover(pix, labeledPix, ratio = 1.0):
numElements = numpy.amax(labeledPix)
maxSize = numpy.count_nonzero(labeledPix)
MAXIMUMCONTRAST = 443.405
for regionID in range(numElements):
start = time.clock()
regionID += 1
if regionID not in labeledPix:
continue
#print(regionID)
#print((regionID / numElements) * 100, '%')
neighborIDs = getNeighbors(labeledPix, regionID)
if 0 in neighborIDs:
neighborIDs.remove(0) #remove white value
regionMask = labeledPix == regionID
region = pix[regionMask]
size = numpy.count_nonzero(regionMask)
contrastMin = (ratio - (size / maxSize)) * MAXIMUMCONTRAST
regionMean = region.mean(axis = 0)
if len(neighborIDs) > 200:
contrast = numpy.zeros(labeledPix.shape)
contrast[labeledPix!=0] = numpy.sqrt(numpy.sum((regionMean - pix[labeledPix!=0])**2, axis = -1))
significantMask = (contrast < contrastMin)
significantContrasts = list(numpy.unique(contrast[significantMask]))
significantNeighbors = {}
for significantContrast in significantContrasts:
minContrast = min(significantContrasts)
if labeledPix[contrast == minContrast][0] in neighborIDs:
significantNeighbors[minContrast] = labeledPix[contrast == minContrast][0]
else:
significantContrasts.pop(significantContrasts.index(minContrast))
else:
significantNeighbors = {}
for neighborID in neighborIDs:
neighborMask = labeledPix == neighborID
neighbor = pix[neighborMask]
neighborMean = neighbor.mean(axis = 0)
contrast = numpy.sqrt(numpy.sum((regionMean - neighborMean)**2, axis = -1))
if contrast < contrastMin:
significantNeighbors[contrast] = neighborID
if significantNeighbors:
contrasts = significantNeighbors.keys()
minContrast = min(contrasts)
minNeighbor = significantNeighbors[minContrast]
neighborMask = labeledPix == minNeighbor
neighborSize = numpy.count_nonzero(neighborMask)
if neighborSize <= size:
labeledPix[neighborMask] = regionID
pix[neighborMask] = regionMean
else:
labeledPix[regionMask] = minNeighbor
pix[regionMask] = pix[neighborMask].mean(axis = 0)
print(time.clock() - start)
return pix
pix
labeledPix
I know I'm asking for a lot of help, but I've been stuck on this for a few weeks and am unsure what else I can do. Any help will be greatly appreciated!
Here is an optimized version of most of your logic (I underestimated the amount of work that would be...). I skipped the >200 branch and am using fake data because I couldn't access your link. When I switch off your >200 branch your and my code appear to give the same result but mine is quite a bit faster on the fake example.
Sample output:
original
26.056154000000003
optimized
0.763613000000003
equal
True
Code:
import numpy as np
from numpy.lib.stride_tricks import as_strided
def mockdata(m, n, k):
colors = np.random.random((m, n, 3))
i, j = np.ogrid[:m, :n]
labels = np.round(k*k * (np.sin(0.05 * i) + np.sin(0.05 * j)**2)).astype(int) % k
return colors, labels
DIAG_NEIGHBORS = True
MAXIMUMCONTRAST = 443.405
def textureRemover2(pix, labeledPix, ratio=1.0):
start = time.clock()
pix, labeledPix = pix.copy(), labeledPix.copy()
pixf, labeledPixf = pix.reshape(-1, 3), labeledPix.ravel()
m, n = labeledPix.shape
s, t = labeledPix.strides
# find all sizes in O(n)
sizes = np.bincount(labeledPixf)
n_ids = len(sizes)
# make index for quick access to labeled areas
lblidx = np.split(np.argsort(labeledPixf), np.cumsum(sizes[:-1]))
lblidx[0] = None
# find all mean colors in O(n)
regionMeans = np.transpose([np.bincount(labeledPix.ravel(), px)
/ np.maximum(sizes, 1)
for px in pix.reshape(-1, 3).T])
# find all neighbors in O(n)
horz = set(frozenset(p) for bl in as_strided(labeledPix, (m,n-1,2), (s,t,t))
for p in bl)
vert = set(frozenset(p) for bl in as_strided(labeledPix, (m-1,n,2), (s,t,s))
for p in bl)
nb = horz|vert
if DIAG_NEIGHBORS:
dwnrgt = set(frozenset(p) for bl in as_strided(
labeledPix, (m-1,n-1,2), (s,t,s+t)) for p in bl)
dwnlft = set(frozenset(p) for bl in as_strided(
labeledPix[::-1], (m-1,n-1,2), (-s,t,t-s)) for p in bl)
nb = nb|dwnrgt|dwnlft
nb = {p for p in nb if len(p) == 2 and not 0 in p}
nb_dict = {}
for a, b in nb:
nb_dict.setdefault(a, set()).add(b)
nb_dict.setdefault(b, set()).add(a)
maxSize = labeledPix.size - sizes[0]
for id_ in range(1, n_ids):
nbs = list(nb_dict.get(id_, set()))
if not nbs:
continue
d = regionMeans[id_] - regionMeans[nbs]
d = np.einsum('ij,ij->i', d, d)
mnd = np.argmin(d)
if d[mnd] < ((ratio - sizes[id_]/maxSize) * MAXIMUMCONTRAST)**2:
mn = nbs[mnd]
lrg, sml = (id_, mn) if sizes[id_] >= sizes[mn] else (mn, id_)
sizes[lrg], sizes[sml] = sizes[lrg] + sizes[sml], 0
for nb in nb_dict[sml]:
nb_dict[nb].remove(sml)
nb_dict[nb].add(lrg)
nb_dict[lrg].update(nb_dict[sml])
nb_dict[lrg].remove(lrg)
nb_dict[sml] = set()
pixf[lblidx[sml]] = regionMeans[lrg]
labeledPixf[lblidx[sml]] = lrg
lblidx[lrg], lblidx[sml] = np.r_[lblidx[lrg],lblidx[sml]], None
print(time.clock() - start)
return pix
from scipy.ndimage.morphology import binary_dilation
import time
STRUCTEL = np.ones((3,3), int) if DIAG_NEIGHBORS else np.array([[0,1,0],[1,1,1],[0,1,0]], int)
def getNeighbors(labeledPix, regionID):
nb = set(labeledPix[binary_dilation(labeledPix == regionID, structure=STRUCTEL)])
nb.remove(regionID)
return sorted(nb)
numpy = np
def textureRemover(pix, labeledPix, ratio = 1.0):
pix, labeledPix = pix.copy(), labeledPix.copy()
numElements = numpy.amax(labeledPix)
maxSize = numpy.count_nonzero(labeledPix)
MAXIMUMCONTRAST = 443.405
start = time.clock()
for regionID in range(numElements):
regionID += 1
if regionID not in labeledPix:
continue
#print(regionID)
#print((regionID / numElements) * 100, '%')
neighborIDs = getNeighbors(labeledPix, regionID)
if 0 in neighborIDs:
neighborIDs.remove(0) #remove white value
regionMask = labeledPix == regionID
region = pix[regionMask]
size = numpy.count_nonzero(regionMask)
contrastMin = (ratio - (size / maxSize)) * MAXIMUMCONTRAST
regionMean = region.mean(axis = 0)
if len(neighborIDs) > 20000:
contrast = numpy.zeros(labeledPix.shape)
contrast[labeledPix!=0] = numpy.sqrt(numpy.sum((regionMean - pix[labeledPix!=0])**2, axis = -1))
significantMask = (contrast < contrastMin)
significantContrasts = list(numpy.unique(contrast[significantMask]))
significantNeighbors = {}
for significantContrast in significantContrasts:
minContrast = min(significantContrasts)
if labeledPix[contrast == minContrast][0] in neighborIDs:
significantNeighbors[minContrast] = labeledPix[contrast == minContrast][0]
else:
significantContrasts.pop(significantContrasts.index(minContrast))
else:
significantNeighbors = {}
for neighborID in neighborIDs:
neighborMask = labeledPix == neighborID
neighbor = pix[neighborMask]
neighborMean = neighbor.mean(axis = 0)
contrast = numpy.sqrt(numpy.sum((regionMean - neighborMean)**2, axis = -1))
if contrast < contrastMin:
significantNeighbors[contrast] = neighborID
if significantNeighbors:
contrasts = significantNeighbors.keys()
minContrast = min(contrasts)
minNeighbor = significantNeighbors[minContrast]
neighborMask = labeledPix == minNeighbor
neighborSize = numpy.count_nonzero(neighborMask)
if neighborSize <= size:
labeledPix[neighborMask] = regionID
pix[neighborMask] = regionMean
else:
labeledPix[regionMask] = minNeighbor
pix[regionMask] = pix[neighborMask].mean(axis = 0)
print(time.clock() - start)
return pix
data = mockdata(200, 200, 1000)
print('original')
res0 = textureRemover(*data)
print('optimized')
res2 = textureRemover2(*data)
print('equal')
print(np.allclose(res0, res2))

Categories