I am working on a project in OpenCV where I have a contour and I have to fit some arcs with defined arc length, radius, chord length, and angle. With fit, I mean I have to choose the best arc, that would cover a piece of the contour.
What I have tried till now is in all the contours, I have found corners which are acting as my segments, then using some linear algebra I am calculating the radius and the center of the circle that would best fit that segment and then comparing this with all the other arcs, radius and central angle and fitting the with the least difference in values. This approach is not helping me much as the arcs that I am getting don't lie on the contour. Code is attached
for contours in final_contours:
iter = 0
dict_sorted, sorted_corners = sortCorners(contours, corners)
keys_dict_sorted = sorted(dict_sorted.keys())
for key in keys_dict_sorted:
[x, y] = dict_sorted[key]
bl_img = cv2.circle(bl_img, (x, y), 3, (255, 0, 0), -1)
# cv2_imshow(bl_img)
for i in range(len(keys_dict_sorted) ):
if (i != len(keys_dict_sorted)-1):
idx1 = keys_dict_sorted[i]
idx3 = keys_dict_sorted[i+1]
idx2 = math.floor((keys_dict_sorted[i] + keys_dict_sorted[i+1]) / 2)
else:
idx1 = keys_dict_sorted[i]
idx3 = keys_dict_sorted[0]
idx2 = math.floor((keys_dict_sorted[i] + keys_dict_sorted[0]) / 2)
radius, [c_x, c_y] = betterCircleFinder(contours[idx1][0], contours[idx2][0], contours[idx3][0])
if radius != -1:
radius = round(truncate(radius, -1))
c_x = round(truncate(c_x, -1))
c_y = round(truncate(c_y, -1))
pt1_angle = 180*(np.arctan2(np.array([contours[idx1][0][1]- c_y], dtype=float), np.array([contours[idx1][0][0] - c_x], dtype=float))/np.pi)
pt2_angle = 180*(np.arctan2(np.array([contours[idx3][0][1] - c_y], dtype=float),np.array([contours[idx3][0][0] - c_x], dtype=float))/np.pi)
# print(pt1_angle, pt2_angle)
angle = abs(pt1_angle - pt2_angle)
# print("Angle : ", angle)
actualRadius = radius * inchPerPixel
# print("Actual Radius : ", actualRadius)
b = random.randint(0,255)
g = random.randint(0, 255)
r = random.randint(0,255)
error = [0 for i in range(len(copings))]
idx = 0
for coping in copings:
err = abs(actualRadius - (coping.radius))
err1 = abs(angle - coping.centralAngle)
error[idx] = (0.5 * err/actualRadius) + (0.5 * err1[0]/coping.centralAngle)
idx+=1
# bl_img = draw_coris(bl_img, int(coping.radius * pixelPerInch), 0, pt1_angle, pt1_angle + coping.centralAngle , (c_x, c_y), (b, g,r), 5)
# cv2_imshow(bl_img)
index_min = min(range(len(error)), key=error.__getitem__)
# pprint.pprint(error)
# print(index_min, error[index_min])
# print(copings[index_min].radius * pixelPerInch)
bl_img = draw_coris(bl_img, int(copings[index_min].radius * pixelPerInch), 0, pt1_angle, pt1_angle + copings[index_min].centralAngle , (int(c_x), int(c_y)), (b, g,r), 5)
if (abs(c_x) > (2 ** 31 -1) or abs(c_y) > (2**31 -1)):
print("OVERFLOW VALUES")
# cv2_imshow(bl_img)
continue
def betterCircleFinder(pt1, pt2, pt3):
A = np.array([[2*pt1[0], 2*pt1[1], 1],
[2*pt2[0], 2*pt2[1], 1],
[2*pt3[0], 2*pt3[1], 1] ])
B = np.array([[-1 * (pt1[0] ** 2 + pt1[1]**2)],
[-1 * (pt2[0] ** 2 + pt2[1]**2)],
[-1 * (pt3[0] ** 2 + pt3[1]**2)]])
det = np.linalg.det(A)
if (det == 0):
return -1, [-1, -1]
C = np.linalg.inv(A)#B
C = np.squeeze(C)
c_x = -1 * C[0]
c_y = -1 * C[1]
c = C[2]
radius = math.sqrt(c_x ** 2 + c_y** 2 - c)
return radius, [c_x, c_y]
This is the code that find the details of the circle that best fits the segment.
Related
Recently, I tried to implement Phong shading with only NumPy and PIL using python. But there is some black-and-white noise in the rendered image. Can you point out what I should do to improve my code to fix the issue?
The resulting image is as follows:
The mesh model could be downloaded from https://github.com/google/nerfactor/blob/main/third_party/xiuminglib/data/models/teapot.obj.
You could try the code below by yourself.
import random
import numpy as np
import trimesh
from meshio import load_obj
from PIL import Image
def phong_shading(light_direction, view_direction, normal, material):
# Calculate the ambient color
ambient_color = material.ambient_color
# Calculate the diffuse color
diffuse_coefficient = max(np.dot(normal, light_direction), 0)
diffuse_color = diffuse_coefficient * material.diffuse_color
# Calculate the specular color
halfway_direction = normalize(light_direction + view_direction)
specular_coefficient = max(np.dot(normal, halfway_direction), 0)
specular_coefficient = specular_coefficient ** material.shininess
specular_color = specular_coefficient * material.specular_color
# Combine the ambient, diffuse and specular colors
final_color = specular_color + diffuse_color + ambient_color
return final_color
def normalize(v, axis=-1, epsilon=1e-12):
square_sum = np.sum(np.square(v), axis, keepdims=True)
v_inv_norm = 1. / np.sqrt(np.maximum(square_sum, epsilon))
return v * v_inv_norm
def rasterize_triangle(vertices):
# calculate the bounding box of the triangle
min_x = int(min(vertices[:, 0]))
max_x = int(max(vertices[:, 0])) + 1
min_y = int(min(vertices[:, 1]))
max_y = int(max(vertices[:, 1])) + 1
for x in range(min_x, max_x):
for y in range(min_y, max_y):
if point_in_triangle(vertices, x, y):
yield (x, y)
def is_point_in_triangle(vertices, x, y):
v0, v1, v2 = vertices
A = 1/2 * (-v1[1]*v2[0] + v0[1]*(-v1[0] + v2[0]) +
v0[0]*(v1[1] - v2[1]) + v1[0]*v2[1])
s = v0[1]*v2[0] - v0[0]*v2[1] + (v2[1] - v0[1])*x + (v0[0] - v2[0])*y
t = v0[0]*v1[1] - v0[1]*v1[0] + (v0[1] - v1[1])*x + (v1[0] - v0[0])*y
return 0 <= s and s <= A and 0 <= t and t <= A and (s + t) <= A
def point_in_triangle(vertices, x, y):
# x, y = point
v0, v1, v2 = vertices
x1, y1, x2, y2, x3, y3 = v0[0], v0[1], v1[0], v1[1], v2[0], v2[1]
# Compute barycentric coordinates
denom = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3)
l1 = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / denom
l2 = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / denom
l3 = 1 - l1 - l2
# Check if point is inside the triangle
return 0 <= l1 <= 1 and 0 <= l2 <= 1 and 0 <= l3 <= 1
def world_to_camera_coordinates(vertices, camera_position):
''' convert from world coordinate to camera_coordinate.
this function has the assumption that the camera is looking at the origin.
and the y axis of the camera is pointing down to the ground.
Args:
vertices (np.array): the vertices of the mesh in world coordinate.
Returns:
the vertices in camera coordinate.
'''
camera_z_axis = -normalize(camera_position) # (3,)
world_z_axis = np.array([0, 0, 1])
project_y_on_z = -(-world_z_axis # camera_z_axis.T) * camera_z_axis
camera_y_axis = project_y_on_z - world_z_axis # (3,)
camera_x_axis = np.cross(camera_y_axis, camera_z_axis) # (3,)
camera_matrix = np.stack([camera_x_axis, camera_y_axis, camera_z_axis])
return (camera_matrix # (vertices - camera_position).T).T
def camera_to_screen_coordinates(vertices, width, height, fov, near_clip, far_clip):
aspect_ratio = width / height
# Create the perspective projection matrix
projection_matrix = perspective(fov, aspect_ratio, near_clip, far_clip)
# create a matrix to store the transformed vertices
transformed_vertices = np.ones((len(vertices), 4))
transformed_vertices[:, :3] = vertices
# multiply each vertex by the projection matrix
transformed_vertices = np.matmul(transformed_vertices, projection_matrix.T)
# Convert from homogeneous coordinates to screen coordinates
transformed_vertices[:, 0] = (
transformed_vertices[:, 0] / transformed_vertices[:, 3]) * (width / 2) + (width / 2)
transformed_vertices[:, 1] = (
transformed_vertices[:, 1] / transformed_vertices[:, 3]) * (height / 2) + (height / 2)
return transformed_vertices[:, :2]
def perspective(fov, aspect_ratio, near_clip, far_clip):
fov = np.radians(fov)
t = np.tan(fov / 2) * near_clip
b = -t
r = t * aspect_ratio
l = -r
projection_matrix = np.array(
[
[(2 * near_clip) / (r - l), 0, (r + l) / (r - l), 0],
[0, (2 * near_clip) / (t - b), (t + b) / (t - b), 0],
[0, 0, -(far_clip + near_clip) / (far_clip - near_clip),
-(2 * far_clip * near_clip) / (far_clip - near_clip)],
[0, 0, -1, 0]
]
)
return projection_matrix
def transform_to_screen_space(vertices, camera_position, img_width, img_height):
assert img_width == img_height, 'The image must be square'
# Transform the vertices to camera space
camera_vertices = world_to_camera_coordinates(vertices, camera_position)
# Transform the vertices to perspective space
fov = 45
focal = img_width / (2 * np.tan(np.radians(fov / 2)))
screen_vertices = camera_vertices / camera_vertices[:, 2].reshape(-1, 1)
screen_vertices[:, :2] = screen_vertices[:, :2] * focal + img_height / 2
return screen_vertices, camera_vertices
def area_triangle(v1, v2, v3):
''' compute the area of a triangle.
'''
return 0.5 * np.linalg.norm(np.cross(v2 - v1, v3 - v1))
def compute_vertices_normals(vertices, faces):
''' compute the normal vector for each vertex.
Args:
vertices (np.array): the vertices of the mesh in world coordinate.
faces
'''
# method with trimesh
# '''
mesh = trimesh.Trimesh(vertices=vertices, faces=faces, processed=False)
vertices_normals = normalize(mesh.vertex_normals, epsilon=1e-160)
# '''
# method with numpy
'''
vertices_normals = np.zeros_like(vertices).astype(np.float128)
v1 = vertices[faces][:, 0]
v2 = vertices[faces][:, 1]
v3 = vertices[faces][:, 2]
normal_before_normalization = np.cross(v2 - v1, v3 - v1)
per_face_area = 0.5 * np.linalg.norm(
normal_before_normalization, axis=-1, keepdims=True
)
per_face_area_enlarged = per_face_area * \
per_face_area.shape[0] / per_face_area.sum()
per_face_normal = normalize(normal_before_normalization, epsilon=1e-160)
weighted_normal = per_face_normal * per_face_area_enlarged
weighted_normal_boardcast = np.reshape(
np.repeat(np.expand_dims(weighted_normal, 1), 3, axis=1), (-1, 3)
)
np.add.at(vertices_normals, faces.ravel(), weighted_normal_boardcast)
vertices_normals = normalize(vertices_normals, epsilon=1e-160)
'''
return vertices_normals
def barycentric_coords(triangle_vertices, x, y):
x1, y1, z1 = triangle_vertices[0]
x2, y2, z2 = triangle_vertices[1]
x3, y3, z3 = triangle_vertices[2]
# calculate barycentric coordinates
lambda1 = ((y2 - y3)*(x - x3) + (x3 - x2)*(y - y3)) / \
((y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3))
lambda2 = ((y3 - y1)*(x - x3) + (x1 - x3)*(y - y3)) / \
((y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3))
lambda3 = 1 - lambda1 - lambda2
return np.array([lambda1, lambda2, lambda3]).reshape(-1, 1)
def render_phong(vertices, faces, camera_position, light_position, width, height, material):
# compute the normal vector for each vertex
vertices_normals = compute_vertices_normals(vertices, faces)
# Transform the vertices to screen space
transformed_vertices, camera_vertices = transform_to_screen_space(
vertices, camera_position, width, height)
# Create an empty image
img = Image.new('RGB', (width, height), (0, 0, 0))
pixels = img.load()
pixel_depth = np.ones((width, height)) * np.inf
for face in faces:
v1 = transformed_vertices[face[0]]
v2 = transformed_vertices[face[1]]
v3 = transformed_vertices[face[2]]
if area_triangle(v1, v2, v3) == 0:
continue
# calculate the normal vector for the face
normal = vertices_normals[face]
# calculate the light and view direction vectors for each vertex
light_direction = normalize(light_position - vertices[face])
view_direction = normalize(camera_position - vertices[face])
# Rasterize the triangle
for x, y in rasterize_triangle(transformed_vertices[face]):
for i in range(20):
tubx = random.uniform(0, 1.0) + x
tuby = random.uniform(0, 1.0) + y
# calculate the barycentric coordinates of the pixel
barycentric = barycentric_coords(
transformed_vertices[face], tubx, tuby)
if np.min(barycentric) < 0: # Check if pixel is outside of the triangle
continue
# Interpolate the vertex attributes to get per-pixel attributes
interpolated_normal = (barycentric * normal).sum(axis=0)
interpolated_light_direction = (
barycentric * light_direction
).sum(axis=0)
interpolated_view_direction = (
barycentric * view_direction
).sum(axis=0)
interpolated_camera_vertices = (
barycentric * camera_vertices[face]).sum(axis=0)
# Calculate the color of the pixel
color = phong_shading(interpolated_light_direction,
interpolated_view_direction, interpolated_normal, material)
if x >= 0 and x < width and y >= 0 and y < height:
oldr, oldg, oldb = pixels[x, y]
newr, newg, newb = (np.clip(color, 0, 1)
* 255).astype(np.uint8)
# newr = newr if newr > oldr else oldr
# newg = newg if newg > oldg else oldg
# newb = newb if newb > oldb else oldb
depth = interpolated_camera_vertices[2]
if depth < pixel_depth[x, y]:
# print(depth, pixel_depth[x, y])
pixel_depth[x, y] = depth
pixels[x, y] = (newr, newg, newb)
# if x < 453 and x > 415 and y > 255 and y < 265:
# img.save(f"debug/f_{face}_x_{x}_y_{y}_d_{depth}.jpg")
return img
class PhongShader():
def __init__(self, light_position, camera_position, image_width=512, image_height=512):
# assert the camera position is not along z axis.
self.light_position = light_position
self.camera_position = camera_position
self.image_width = image_width
self.image_height = image_height
def render(self, vertices, faces, material):
return render_phong(vertices, faces, self.camera_position, self.light_position, self.image_width, self.image_height, material)
class Material():
def __init__(self) -> None:
self.ambient_color = np.array([0.1, 0.1, 0.1])
self.diffuse_color = np.array([1., 0.0, 0.5])
self.specular_color = np.array([0.5, 0.5, 0.5])
self.shininess = 50
def main():
# load the mesh
mesh = trimesh.load('teapot.obj')
vertices, faces = mesh.vertices, mesh.faces
# create a shader
shader = PhongShader(light_position=np.array(
[8, 0, 0]), camera_position=np.array([8, 0, 0]))
# render the image
material = Material()
img = shader.render(vertices, faces, material)
img.save("output.jpg")
if __name__ == '__main__':
main()
The possible reason could be discreazation in coding. But I am not sure how to fix it.
I want to draw a line on an image. I have only to give the angle and the end point of the line. How can I do this with Python?
I think it is easy by identifying the vertical line passing through that given point and ploting the line according to the angle. The line should ends with the given point.
I tried it with this code. But didn't work.
import math
def get_coords(x, y, angle, imwidth, imheight):
#img = cv2.imread('contours_none_image2.jpg', 1)
x1_length = (x-imwidth) / math.cos(angle)
y1_length = (y-imheight) / math.sin(angle)
length = max(abs(x1_length), abs(y1_length))
endx1 = x + length * math.cos(math.radians(angle))
endy1 = y + length * math.sin(math.radians(angle))
x2_length = (x-imwidth) / math.cos(angle+45)
y2_length = (y-imheight) / math.sin(angle+45)
length = max(abs(x2_length), abs(y2_length))
endx2 = x + length * math.cos(math.radians(angle+45))
endy2 = y + length * math.sin(math.radians(angle+45))
cv2.line(img, (int(endx1),int(endy1)), (int(endx2),int(endy2)), (0, 255, 255), 3)
cv2.imshow("contours_none_image2.jpg", img)
#cv2.imshow("contours_none_image2.jpg", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
return endx1, endy1, endx2, endy2
An interesting way for finding the intersection point between the Y axis and the line is by using three cross products with homogeneous coordinates.
Ways for finding lines intersections are described in Wikipedia.
The cross products solution using homogeneous coordinates is described here.
Start by finding a very "far" origin point (x, y) - outside the image:
length = cv2.norm(np.array([imwidth, imheight])) # Apply maximum possible length: length = sqrt(imwidth**2 + imheight**2)
x0 = x - length * math.cos(math.radians(angle))
y0 = y + length * math.sin(math.radians(angle)) # Reverse sings because y axis in image goes down
Finding intersection with the Y axis:
The Y axis may be described as a line from (0,0) to (0, imheight-1).
We may find the line representation in homogeneous coordinates using cross product:
p0 = np.array([0, 0, 1])
p1 = np.array([0, imheight-1, 1])
l0 = np.cross(p0, p1) # [-107, 0, 0]
In the same way we may find the representation of the line from (x0, y0) to (x, y):
p0 = np.array([x0, y0, 1])
p1 = np.array([x, y, 1])
l1 = np.cross(p0, p1)
Finding the intersection point using cross product between the lines, and "normalizing" the homogeneous coordinate:
p = np.cross(l0, l1)
p = p / p[2]
Code sample:
import math
import cv2
import numpy as np
img = np.zeros((108, 192, 3), np.uint8)
x, y, angle = 150, 20, 80
imheight, imwidth = img.shape[0], img.shape[1]
angle = 90 - angle # Usualy the angle is relative to the horizontal axis - use 90 - angle for swaping axes
length = cv2.norm(np.array([imwidth, imheight])) # Apply maximum possible length: length = sqrt(imwidth**2 + imheight**2)
x0 = x - length * math.cos(math.radians(angle))
y0 = y + length * math.sin(math.radians(angle)) # Reverse sings because y axis in image goes down
# http://robotics.stanford.edu/~birch/projective/node4.html
# Find lines in homogeneous coordinates (using cross product):
# l0 represents a line of Y axis.
p0 = np.array([0, 0, 1])
p1 = np.array([0, imheight-1, 1])
l0 = np.cross(p0, p1) # [-107, 0, 0]
# l1 represents
p0 = np.array([x0, y0, 1])
p1 = np.array([x, y, 1])
l1 = np.cross(p0, p1)
# https://en.wikipedia.org/wiki/Line%E2%80%93line_intersection
# Lines intersection in homogeneous coordinates (using cross product):
p = np.cross(l0, l1)
p = p / p[2]
x0, y0 = p[0], p[1]
# Convert from homogeneous coordinate to euclidean coordinate (divide by last element).
cv2.line(img, (int(x0),int(y0)), (int(x),int(y)), (0, 255, 255), 3)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Sample output:
More conventional solution:
We may simply assign x0 = 0, and find length:
x0 = x - length * cos(alpha)
y0 = y + length * sin(alpha)
Assign x0 = 0:
x - length * cos(alpha) = 0
=> x = length * cos(alpha)
=> length = x/cos(alpha)
Code:
length = x / math.cos(math.radians(angle)) # We better verify that math.cos(math.radians(angle)) != 0
x0 = 0
y0 = y + length * math.sin(math.radians(angle))
cv2.line(img, (int(x0),int(y0)), (int(x),int(y)), (255, 0, 0), 3)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Output:
I am trying to inverse a set of maps using this answer here. I used two of his methods so there is more detail as to how they work in his answer. I also left some comments out to shorten the code.
I have my own camera matrix and distortion coeff's that I use to create an x and y map with cv2.initUndistortRectifyMap(), but when I pass them to invert_maps() I get an out-of-bounds error shown below.
None of this (except the bottom part) is my code and its pretty advanced stuff so I have no clue how to debug it. And I dont have enough credit to comment on the orignal answer. Anyone got a solution?
import numpy as np
import cv2 as cv2
from scipy import ndimage as ndi
from matplotlib import pyplot as plt
import glob
def bilinear_inverse(p, vertices, numiter=4):
p = np.asarray(p)
v = np.asarray(vertices)
sh = p.shape[1:]
if v.ndim == 2:
v = np.expand_dims(v, axis=tuple(range(2, 2 + len(sh))))
# Start in the center
s = .5 * np.ones((2,) + sh)
s0, s1 = s
for k in range(numiter):
# Residual
r = v[0] * (1 - s0) * (1 - s1) + v[1] * s0 * (1 - s1) + v[2] * s0 * s1 + v[3] * (1 - s0) * s1 - p
# Jacobian
J11 = -v[0, 0] * (1 - s1) + v[1, 0] * (1 - s1) + v[2, 0] * s1 - v[3, 0] * s1
J21 = -v[0, 1] * (1 - s1) + v[1, 1] * (1 - s1) + v[2, 1] * s1 - v[3, 1] * s1
J12 = -v[0, 0] * (1 - s0) - v[1, 0] * s0 + v[2, 0] * s0 + v[3, 0] * (1 - s0)
J22 = -v[0, 1] * (1 - s0) - v[1, 1] * s0 + v[2, 1] * s0 + v[3, 1] * (1 - s0)
inv_detJ = 1. / (J11 * J22 - J12 * J21)
s0 -= inv_detJ * (J22 * r[0] - J12 * r[1])
s1 -= inv_detJ * (-J21 * r[0] + J11 * r[1])
return s
def invert_map(xmap, ymap, diagnostics=False):
"""
Generate the inverse of deformation map defined by (xmap, ymap) using inverse bilinear interpolation.
"""
# Generate quadrilaterals from mapped grid points.
quads = np.array([[ymap[:-1, :-1], xmap[:-1, :-1]],
[ymap[1:, :-1], xmap[1:, :-1]],
[ymap[1:, 1:], xmap[1:, 1:]],
[ymap[:-1, 1:], xmap[:-1, 1:]]])
# Range of indices possibly within each quadrilateral
x0 = np.floor(quads[:, 1, ...].min(axis=0)).astype(int)
x1 = np.ceil(quads[:, 1, ...].max(axis=0)).astype(int)
y0 = np.floor(quads[:, 0, ...].min(axis=0)).astype(int)
y1 = np.ceil(quads[:, 0, ...].max(axis=0)).astype(int)
# Quad indices
i0, j0 = np.indices(x0.shape)
# Offset of destination map
x0_offset = x0.min()
y0_offset = y0.min()
# Index range in x and y (per quad)
xN = x1 - x0 + 1
yN = y1 - y0 + 1
# Shape of destination array
sh_dest = (1 + x1.max() - x0_offset, 1 + y1.max() - y0_offset)
# Coordinates of destination array
yy_dest, xx_dest = np.indices(sh_dest)
xmap1 = np.zeros(sh_dest)
ymap1 = np.zeros(sh_dest)
TN = np.zeros(sh_dest, dtype=int)
# Smallish number to avoid missing point lying on edges
epsilon = .01
# Loop through indices possibly within quads
for ix in range(xN.max()):
for iy in range(yN.max()):
# Work only with quads whose bounding box contain indices
valid = (xN > ix) * (yN > iy)
# Local points to check
p = np.array([y0[valid] + ix, x0[valid] + iy])
# Map the position of the point in the quad
s = bilinear_inverse(p, quads[:, :, valid])
# s out of unit square means p out of quad
# Keep some epsilon around to avoid missing edges
in_quad = np.all((s > -epsilon) * (s < (1 + epsilon)), axis=0)
# Add found indices
ii = p[0, in_quad] - y0_offset
jj = p[1, in_quad] - x0_offset
ymap1[ii, jj] += i0[valid][in_quad] + s[0][in_quad]
xmap1[ii, jj] += j0[valid][in_quad] + s[1][in_quad]
# Increment count
TN[ii, jj] += 1
ymap1 /= TN + (TN == 0)
xmap1 /= TN + (TN == 0)
if diagnostics:
diag = {'x_offset': x0_offset,
'y_offset': y0_offset,
'mask': TN > 0}
return xmap1, ymap1, diag
else:
return xmap1, ymap1
# cam matrix and dist coeff's that I brought
cam_matrix = np.array([ [1223.07784, 0, 926.80065],
[ 0, 1231.71291, 546.10496],
[ 0, 0, 1]], dtype='float32')
distortion_profile = np.array([-0.32077, 0.15041, 0.001004, 0.00028, -0.04252], dtype='float32')
# get current maps
mapx, mapy = cv2.initUndistortRectifyMap(cam_matrix, distortion, None, cam_matrix, (1920, 1080), 5)
# invert the maps
mapx_invert, mapy_invert = invert_map(mapx, mapy)
# apply mapping to image
inversed = cv2.remap(img, mapx_invert, mapy_invert ,cv2.INTER_LINEAR)
cv2.imwrite('inversed.png', inversed)
Error:
File "c:\Users\...\redist_image2.py", line 121, in invert_map
ymap1[ii, jj] += i0[valid][in_quad] + s[0][in_quad]
IndexError: index 1382 is out of bounds for axis 1 with size 1020
I am trying to colour MandelBrot using HSV values and the PIL Library.
Even after multiple tries fiddling with HSV values, I could not achieve the desired effect.
here is what I currently have
Here is the desired effect
This is the code that I am trying, It could also be beneficial if you could add some tips to optimise the below code to compute the set faster, I am new to python
from PIL import Image
import random
import math
from decimal import Decimal
# Size of the Image Canvas
HEIGHT = 500
ZOOM = 0.0
Y_PAN = 0.0
# Range of the Complex Plane
MIN_X = -2.0 + ZOOM
MAX_X = 2.0 - ZOOM
MAX_Y = 2.0 + Y_PAN - ZOOM
MIN_Y = -2.0 + Y_PAN + ZOOM
DATA = []
def map_to_scale_d(x, in_min, in_max, out_min, out_max):
# returns float
return float((x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min)
def map_to_scale(x, in_min, in_max, out_min, out_max):
# returns int
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
# Max iterations till Zn
ITER = 200
# loop to traverse every single point in Canvas
for y in xrange(HEIGHT):
for x in xrange(HEIGHT):
# convert to complex plane scale
a = map_to_scale_d(x, 0, HEIGHT, MIN_X, MAX_X)
b = map_to_scale_d(y, 0, HEIGHT, MAX_Y, MIN_Y)
# original values
_a = a
_b = b
counter = 0
# start the iteration at (a,b) in complex plane
# calculate z^2 + c
while(counter < ITER):
aa = a * a - b * b
bb = 2 * a * b
a = aa + _a
b = bb + _b
if((abs(aa + bb)) > 4):
break
counter = counter + 1
# initialise color
h = 0
s = map_to_scale(counter, 0, ITER, 0, 100)
v = map_to_scale(counter, 0, ITER, 0, 100)
if(counter == ITER):
h = 0
s = 0
v = 0
# convert to 8-bit
h = map_to_scale(h, 0, 360, 0, 255)
s = map_to_scale(s, 0, 100, 0, 255)
v = map_to_scale(v, 0, 100, 0, 255)
DATA.append((h, s, v))
img = Image.new('HSV', (HEIGHT, HEIGHT))
img.putdata(DATA)
img.show()
img.convert('RGB').save('test.png')
Now that my perlin generator is 'working' I created noise, to find that it is nothing like what I see on the internets...
My noise:
Notice the streaks:
What I am aiming to get (obviously with corresponding colour):
1:
Why does mine look so noisy and nasty?
Code (sorry for no stub, the Perlin noise makes up most of the program so it's important to include the full program):
from PIL import Image
from tkinter import filedialog
from random import randint, random
#Initialise width / height
width = 625
height = 625
#Import gradient picture - 200*1 image used to texture perlin noise
#R,G,B,Alpha
gradient = Image.open("image.png")
gradlist = list(gradient.getdata())
#Create new image
img = Image.new('RGBA', (width, height), color=(255, 255, 255, 255))
#Perlin noise modules --------------------------------------------------------------------------------------------------------
#Modules
from random import sample
from math import floor
p = sample([x for x in range(0, (width * height))], (width * height)) * 2
#Antialising
def fade(t):
retval = 6*(t**5) - 15*(t**4) + 10*(t**3)
return retval
#Linear interpolation
def lerp(t,a,b):
retval = a + (t * (b - a))
return retval
#Clever bitwise hash stuff - picks a unit vector from 12 possible - (1,1,0),(-1,1,0),(1,-1,0),(-1,-1,0),(1,0,1),(-1,0,1),(1,0,-1),(-1,0,-1),(0,1,1),(0,-1,1),(0,1,-1),(0,-1,-1)
def grad(hash, x, y, z):
h = hash % 15
if h < 8:
u = x
else:
u = y
if h < 4:
v = y
elif h in (12, 14):
v = x
else:
v = z
return (u if (h & 1) == 0 else -u) + (v if (h & 2) == 0 else -v)
#Perlin function
def perlin(x,y,z):
ix = int(floor(x)) & 255
iy = int(floor(y)) & 255
iz = int(floor(z)) & 255
x -= int(floor(x))
y -= int(floor(y))
z -= int(floor(z))
u = fade(x)
v = fade(y)
w = fade(z)
#Complicated hash stuff
A = p[ix] + iy
AA = p[A] + iz
AB = p[A + 1] + iz
B = p[ix + 1] + iy
BA = p[B] + iz
BB = p[B + 1] + iz
return -lerp(w, lerp(v, lerp(u, grad(p[AA], x, y, z),grad(p[BA], x - 1, y, z)),lerp(u, grad(p[AB], x, y - 1, z),grad(p[BB], x - 1, y - 1, z))),lerp(v, lerp(u, grad(p[AA + 1], x, y, z - 1),grad(p[BA + 1], x - 1, y, z - 1)), lerp(u, grad(p[AB + 1], x, y - 1, z - 1),grad(p[BB + 1], x - 1, y - 1, z - 1))))
def octavePerlin(x,y,z,octaves,persistence):
total = 0
frequency = 1
amplitude = 1
maxValue = 0
for x in range(octaves):
total += perlin(x * frequency, y * frequency, z * frequency) * amplitude
maxValue += amplitude
amplitude *= persistence
frequency *= 2
return total / maxValue
z = random()
img.putdata([gradlist[int(octavePerlin((x + random() - 0.5) / 1000, (y + random() - 0.5) / 1000, z, 4, 2) * 100 + 100)] for x in range(width) for y in range(height)])
img.save(filedialog.asksaveasfilename() + ".png", "PNG")