Perlin noise looks streaky and not coherent - python

Now that my perlin generator is 'working' I created noise, to find that it is nothing like what I see on the internets...
My noise:
Notice the streaks:
What I am aiming to get (obviously with corresponding colour):
1:
Why does mine look so noisy and nasty?
Code (sorry for no stub, the Perlin noise makes up most of the program so it's important to include the full program):
from PIL import Image
from tkinter import filedialog
from random import randint, random
#Initialise width / height
width = 625
height = 625
#Import gradient picture - 200*1 image used to texture perlin noise
#R,G,B,Alpha
gradient = Image.open("image.png")
gradlist = list(gradient.getdata())
#Create new image
img = Image.new('RGBA', (width, height), color=(255, 255, 255, 255))
#Perlin noise modules --------------------------------------------------------------------------------------------------------
#Modules
from random import sample
from math import floor
p = sample([x for x in range(0, (width * height))], (width * height)) * 2
#Antialising
def fade(t):
retval = 6*(t**5) - 15*(t**4) + 10*(t**3)
return retval
#Linear interpolation
def lerp(t,a,b):
retval = a + (t * (b - a))
return retval
#Clever bitwise hash stuff - picks a unit vector from 12 possible - (1,1,0),(-1,1,0),(1,-1,0),(-1,-1,0),(1,0,1),(-1,0,1),(1,0,-1),(-1,0,-1),(0,1,1),(0,-1,1),(0,1,-1),(0,-1,-1)
def grad(hash, x, y, z):
h = hash % 15
if h < 8:
u = x
else:
u = y
if h < 4:
v = y
elif h in (12, 14):
v = x
else:
v = z
return (u if (h & 1) == 0 else -u) + (v if (h & 2) == 0 else -v)
#Perlin function
def perlin(x,y,z):
ix = int(floor(x)) & 255
iy = int(floor(y)) & 255
iz = int(floor(z)) & 255
x -= int(floor(x))
y -= int(floor(y))
z -= int(floor(z))
u = fade(x)
v = fade(y)
w = fade(z)
#Complicated hash stuff
A = p[ix] + iy
AA = p[A] + iz
AB = p[A + 1] + iz
B = p[ix + 1] + iy
BA = p[B] + iz
BB = p[B + 1] + iz
return -lerp(w, lerp(v, lerp(u, grad(p[AA], x, y, z),grad(p[BA], x - 1, y, z)),lerp(u, grad(p[AB], x, y - 1, z),grad(p[BB], x - 1, y - 1, z))),lerp(v, lerp(u, grad(p[AA + 1], x, y, z - 1),grad(p[BA + 1], x - 1, y, z - 1)), lerp(u, grad(p[AB + 1], x, y - 1, z - 1),grad(p[BB + 1], x - 1, y - 1, z - 1))))
def octavePerlin(x,y,z,octaves,persistence):
total = 0
frequency = 1
amplitude = 1
maxValue = 0
for x in range(octaves):
total += perlin(x * frequency, y * frequency, z * frequency) * amplitude
maxValue += amplitude
amplitude *= persistence
frequency *= 2
return total / maxValue
z = random()
img.putdata([gradlist[int(octavePerlin((x + random() - 0.5) / 1000, (y + random() - 0.5) / 1000, z, 4, 2) * 100 + 100)] for x in range(width) for y in range(height)])
img.save(filedialog.asksaveasfilename() + ".png", "PNG")

Related

Somthing wrong with my python implementation of phong shading with only numpy and PIL

Recently, I tried to implement Phong shading with only NumPy and PIL using python. But there is some black-and-white noise in the rendered image. Can you point out what I should do to improve my code to fix the issue?
The resulting image is as follows:
The mesh model could be downloaded from https://github.com/google/nerfactor/blob/main/third_party/xiuminglib/data/models/teapot.obj.
You could try the code below by yourself.
import random
import numpy as np
import trimesh
from meshio import load_obj
from PIL import Image
def phong_shading(light_direction, view_direction, normal, material):
# Calculate the ambient color
ambient_color = material.ambient_color
# Calculate the diffuse color
diffuse_coefficient = max(np.dot(normal, light_direction), 0)
diffuse_color = diffuse_coefficient * material.diffuse_color
# Calculate the specular color
halfway_direction = normalize(light_direction + view_direction)
specular_coefficient = max(np.dot(normal, halfway_direction), 0)
specular_coefficient = specular_coefficient ** material.shininess
specular_color = specular_coefficient * material.specular_color
# Combine the ambient, diffuse and specular colors
final_color = specular_color + diffuse_color + ambient_color
return final_color
def normalize(v, axis=-1, epsilon=1e-12):
square_sum = np.sum(np.square(v), axis, keepdims=True)
v_inv_norm = 1. / np.sqrt(np.maximum(square_sum, epsilon))
return v * v_inv_norm
def rasterize_triangle(vertices):
# calculate the bounding box of the triangle
min_x = int(min(vertices[:, 0]))
max_x = int(max(vertices[:, 0])) + 1
min_y = int(min(vertices[:, 1]))
max_y = int(max(vertices[:, 1])) + 1
for x in range(min_x, max_x):
for y in range(min_y, max_y):
if point_in_triangle(vertices, x, y):
yield (x, y)
def is_point_in_triangle(vertices, x, y):
v0, v1, v2 = vertices
A = 1/2 * (-v1[1]*v2[0] + v0[1]*(-v1[0] + v2[0]) +
v0[0]*(v1[1] - v2[1]) + v1[0]*v2[1])
s = v0[1]*v2[0] - v0[0]*v2[1] + (v2[1] - v0[1])*x + (v0[0] - v2[0])*y
t = v0[0]*v1[1] - v0[1]*v1[0] + (v0[1] - v1[1])*x + (v1[0] - v0[0])*y
return 0 <= s and s <= A and 0 <= t and t <= A and (s + t) <= A
def point_in_triangle(vertices, x, y):
# x, y = point
v0, v1, v2 = vertices
x1, y1, x2, y2, x3, y3 = v0[0], v0[1], v1[0], v1[1], v2[0], v2[1]
# Compute barycentric coordinates
denom = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3)
l1 = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / denom
l2 = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / denom
l3 = 1 - l1 - l2
# Check if point is inside the triangle
return 0 <= l1 <= 1 and 0 <= l2 <= 1 and 0 <= l3 <= 1
def world_to_camera_coordinates(vertices, camera_position):
''' convert from world coordinate to camera_coordinate.
this function has the assumption that the camera is looking at the origin.
and the y axis of the camera is pointing down to the ground.
Args:
vertices (np.array): the vertices of the mesh in world coordinate.
Returns:
the vertices in camera coordinate.
'''
camera_z_axis = -normalize(camera_position) # (3,)
world_z_axis = np.array([0, 0, 1])
project_y_on_z = -(-world_z_axis # camera_z_axis.T) * camera_z_axis
camera_y_axis = project_y_on_z - world_z_axis # (3,)
camera_x_axis = np.cross(camera_y_axis, camera_z_axis) # (3,)
camera_matrix = np.stack([camera_x_axis, camera_y_axis, camera_z_axis])
return (camera_matrix # (vertices - camera_position).T).T
def camera_to_screen_coordinates(vertices, width, height, fov, near_clip, far_clip):
aspect_ratio = width / height
# Create the perspective projection matrix
projection_matrix = perspective(fov, aspect_ratio, near_clip, far_clip)
# create a matrix to store the transformed vertices
transformed_vertices = np.ones((len(vertices), 4))
transformed_vertices[:, :3] = vertices
# multiply each vertex by the projection matrix
transformed_vertices = np.matmul(transformed_vertices, projection_matrix.T)
# Convert from homogeneous coordinates to screen coordinates
transformed_vertices[:, 0] = (
transformed_vertices[:, 0] / transformed_vertices[:, 3]) * (width / 2) + (width / 2)
transformed_vertices[:, 1] = (
transformed_vertices[:, 1] / transformed_vertices[:, 3]) * (height / 2) + (height / 2)
return transformed_vertices[:, :2]
def perspective(fov, aspect_ratio, near_clip, far_clip):
fov = np.radians(fov)
t = np.tan(fov / 2) * near_clip
b = -t
r = t * aspect_ratio
l = -r
projection_matrix = np.array(
[
[(2 * near_clip) / (r - l), 0, (r + l) / (r - l), 0],
[0, (2 * near_clip) / (t - b), (t + b) / (t - b), 0],
[0, 0, -(far_clip + near_clip) / (far_clip - near_clip),
-(2 * far_clip * near_clip) / (far_clip - near_clip)],
[0, 0, -1, 0]
]
)
return projection_matrix
def transform_to_screen_space(vertices, camera_position, img_width, img_height):
assert img_width == img_height, 'The image must be square'
# Transform the vertices to camera space
camera_vertices = world_to_camera_coordinates(vertices, camera_position)
# Transform the vertices to perspective space
fov = 45
focal = img_width / (2 * np.tan(np.radians(fov / 2)))
screen_vertices = camera_vertices / camera_vertices[:, 2].reshape(-1, 1)
screen_vertices[:, :2] = screen_vertices[:, :2] * focal + img_height / 2
return screen_vertices, camera_vertices
def area_triangle(v1, v2, v3):
''' compute the area of a triangle.
'''
return 0.5 * np.linalg.norm(np.cross(v2 - v1, v3 - v1))
def compute_vertices_normals(vertices, faces):
''' compute the normal vector for each vertex.
Args:
vertices (np.array): the vertices of the mesh in world coordinate.
faces
'''
# method with trimesh
# '''
mesh = trimesh.Trimesh(vertices=vertices, faces=faces, processed=False)
vertices_normals = normalize(mesh.vertex_normals, epsilon=1e-160)
# '''
# method with numpy
'''
vertices_normals = np.zeros_like(vertices).astype(np.float128)
v1 = vertices[faces][:, 0]
v2 = vertices[faces][:, 1]
v3 = vertices[faces][:, 2]
normal_before_normalization = np.cross(v2 - v1, v3 - v1)
per_face_area = 0.5 * np.linalg.norm(
normal_before_normalization, axis=-1, keepdims=True
)
per_face_area_enlarged = per_face_area * \
per_face_area.shape[0] / per_face_area.sum()
per_face_normal = normalize(normal_before_normalization, epsilon=1e-160)
weighted_normal = per_face_normal * per_face_area_enlarged
weighted_normal_boardcast = np.reshape(
np.repeat(np.expand_dims(weighted_normal, 1), 3, axis=1), (-1, 3)
)
np.add.at(vertices_normals, faces.ravel(), weighted_normal_boardcast)
vertices_normals = normalize(vertices_normals, epsilon=1e-160)
'''
return vertices_normals
def barycentric_coords(triangle_vertices, x, y):
x1, y1, z1 = triangle_vertices[0]
x2, y2, z2 = triangle_vertices[1]
x3, y3, z3 = triangle_vertices[2]
# calculate barycentric coordinates
lambda1 = ((y2 - y3)*(x - x3) + (x3 - x2)*(y - y3)) / \
((y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3))
lambda2 = ((y3 - y1)*(x - x3) + (x1 - x3)*(y - y3)) / \
((y2 - y3)*(x1 - x3) + (x3 - x2)*(y1 - y3))
lambda3 = 1 - lambda1 - lambda2
return np.array([lambda1, lambda2, lambda3]).reshape(-1, 1)
def render_phong(vertices, faces, camera_position, light_position, width, height, material):
# compute the normal vector for each vertex
vertices_normals = compute_vertices_normals(vertices, faces)
# Transform the vertices to screen space
transformed_vertices, camera_vertices = transform_to_screen_space(
vertices, camera_position, width, height)
# Create an empty image
img = Image.new('RGB', (width, height), (0, 0, 0))
pixels = img.load()
pixel_depth = np.ones((width, height)) * np.inf
for face in faces:
v1 = transformed_vertices[face[0]]
v2 = transformed_vertices[face[1]]
v3 = transformed_vertices[face[2]]
if area_triangle(v1, v2, v3) == 0:
continue
# calculate the normal vector for the face
normal = vertices_normals[face]
# calculate the light and view direction vectors for each vertex
light_direction = normalize(light_position - vertices[face])
view_direction = normalize(camera_position - vertices[face])
# Rasterize the triangle
for x, y in rasterize_triangle(transformed_vertices[face]):
for i in range(20):
tubx = random.uniform(0, 1.0) + x
tuby = random.uniform(0, 1.0) + y
# calculate the barycentric coordinates of the pixel
barycentric = barycentric_coords(
transformed_vertices[face], tubx, tuby)
if np.min(barycentric) < 0: # Check if pixel is outside of the triangle
continue
# Interpolate the vertex attributes to get per-pixel attributes
interpolated_normal = (barycentric * normal).sum(axis=0)
interpolated_light_direction = (
barycentric * light_direction
).sum(axis=0)
interpolated_view_direction = (
barycentric * view_direction
).sum(axis=0)
interpolated_camera_vertices = (
barycentric * camera_vertices[face]).sum(axis=0)
# Calculate the color of the pixel
color = phong_shading(interpolated_light_direction,
interpolated_view_direction, interpolated_normal, material)
if x >= 0 and x < width and y >= 0 and y < height:
oldr, oldg, oldb = pixels[x, y]
newr, newg, newb = (np.clip(color, 0, 1)
* 255).astype(np.uint8)
# newr = newr if newr > oldr else oldr
# newg = newg if newg > oldg else oldg
# newb = newb if newb > oldb else oldb
depth = interpolated_camera_vertices[2]
if depth < pixel_depth[x, y]:
# print(depth, pixel_depth[x, y])
pixel_depth[x, y] = depth
pixels[x, y] = (newr, newg, newb)
# if x < 453 and x > 415 and y > 255 and y < 265:
# img.save(f"debug/f_{face}_x_{x}_y_{y}_d_{depth}.jpg")
return img
class PhongShader():
def __init__(self, light_position, camera_position, image_width=512, image_height=512):
# assert the camera position is not along z axis.
self.light_position = light_position
self.camera_position = camera_position
self.image_width = image_width
self.image_height = image_height
def render(self, vertices, faces, material):
return render_phong(vertices, faces, self.camera_position, self.light_position, self.image_width, self.image_height, material)
class Material():
def __init__(self) -> None:
self.ambient_color = np.array([0.1, 0.1, 0.1])
self.diffuse_color = np.array([1., 0.0, 0.5])
self.specular_color = np.array([0.5, 0.5, 0.5])
self.shininess = 50
def main():
# load the mesh
mesh = trimesh.load('teapot.obj')
vertices, faces = mesh.vertices, mesh.faces
# create a shader
shader = PhongShader(light_position=np.array(
[8, 0, 0]), camera_position=np.array([8, 0, 0]))
# render the image
material = Material()
img = shader.render(vertices, faces, material)
img.save("output.jpg")
if __name__ == '__main__':
main()
The possible reason could be discreazation in coding. But I am not sure how to fix it.

Fitting arcs on a contour using OpenCV?

I am working on a project in OpenCV where I have a contour and I have to fit some arcs with defined arc length, radius, chord length, and angle. With fit, I mean I have to choose the best arc, that would cover a piece of the contour.
What I have tried till now is in all the contours, I have found corners which are acting as my segments, then using some linear algebra I am calculating the radius and the center of the circle that would best fit that segment and then comparing this with all the other arcs, radius and central angle and fitting the with the least difference in values. This approach is not helping me much as the arcs that I am getting don't lie on the contour. Code is attached
for contours in final_contours:
iter = 0
dict_sorted, sorted_corners = sortCorners(contours, corners)
keys_dict_sorted = sorted(dict_sorted.keys())
for key in keys_dict_sorted:
[x, y] = dict_sorted[key]
bl_img = cv2.circle(bl_img, (x, y), 3, (255, 0, 0), -1)
# cv2_imshow(bl_img)
for i in range(len(keys_dict_sorted) ):
if (i != len(keys_dict_sorted)-1):
idx1 = keys_dict_sorted[i]
idx3 = keys_dict_sorted[i+1]
idx2 = math.floor((keys_dict_sorted[i] + keys_dict_sorted[i+1]) / 2)
else:
idx1 = keys_dict_sorted[i]
idx3 = keys_dict_sorted[0]
idx2 = math.floor((keys_dict_sorted[i] + keys_dict_sorted[0]) / 2)
radius, [c_x, c_y] = betterCircleFinder(contours[idx1][0], contours[idx2][0], contours[idx3][0])
if radius != -1:
radius = round(truncate(radius, -1))
c_x = round(truncate(c_x, -1))
c_y = round(truncate(c_y, -1))
pt1_angle = 180*(np.arctan2(np.array([contours[idx1][0][1]- c_y], dtype=float), np.array([contours[idx1][0][0] - c_x], dtype=float))/np.pi)
pt2_angle = 180*(np.arctan2(np.array([contours[idx3][0][1] - c_y], dtype=float),np.array([contours[idx3][0][0] - c_x], dtype=float))/np.pi)
# print(pt1_angle, pt2_angle)
angle = abs(pt1_angle - pt2_angle)
# print("Angle : ", angle)
actualRadius = radius * inchPerPixel
# print("Actual Radius : ", actualRadius)
b = random.randint(0,255)
g = random.randint(0, 255)
r = random.randint(0,255)
error = [0 for i in range(len(copings))]
idx = 0
for coping in copings:
err = abs(actualRadius - (coping.radius))
err1 = abs(angle - coping.centralAngle)
error[idx] = (0.5 * err/actualRadius) + (0.5 * err1[0]/coping.centralAngle)
idx+=1
# bl_img = draw_coris(bl_img, int(coping.radius * pixelPerInch), 0, pt1_angle, pt1_angle + coping.centralAngle , (c_x, c_y), (b, g,r), 5)
# cv2_imshow(bl_img)
index_min = min(range(len(error)), key=error.__getitem__)
# pprint.pprint(error)
# print(index_min, error[index_min])
# print(copings[index_min].radius * pixelPerInch)
bl_img = draw_coris(bl_img, int(copings[index_min].radius * pixelPerInch), 0, pt1_angle, pt1_angle + copings[index_min].centralAngle , (int(c_x), int(c_y)), (b, g,r), 5)
if (abs(c_x) > (2 ** 31 -1) or abs(c_y) > (2**31 -1)):
print("OVERFLOW VALUES")
# cv2_imshow(bl_img)
continue
def betterCircleFinder(pt1, pt2, pt3):
A = np.array([[2*pt1[0], 2*pt1[1], 1],
[2*pt2[0], 2*pt2[1], 1],
[2*pt3[0], 2*pt3[1], 1] ])
B = np.array([[-1 * (pt1[0] ** 2 + pt1[1]**2)],
[-1 * (pt2[0] ** 2 + pt2[1]**2)],
[-1 * (pt3[0] ** 2 + pt3[1]**2)]])
det = np.linalg.det(A)
if (det == 0):
return -1, [-1, -1]
C = np.linalg.inv(A)#B
C = np.squeeze(C)
c_x = -1 * C[0]
c_y = -1 * C[1]
c = C[2]
radius = math.sqrt(c_x ** 2 + c_y** 2 - c)
return radius, [c_x, c_y]
This is the code that find the details of the circle that best fits the segment.

Perlin noise generator isn't working, doesn't look smooth

I watched some tutorials and tried to create a Perlin noise generator in python.
It takes in a tuple for the number of vectors in the x and y directions and a scale for the distance in pixels between the arrays, then calculates the dot product between each pixel and each of the 4 arrays surrounding it, It then interpolates them bilinearly to get the pixel's value.
here's the code:
from PIL import Image
import numpy as np
scale = 16
size = np.array([8, 8])
vectors = []
for i in range(size[0]):
for j in range(size[1]):
rand = np.random.rand() * 2 * np.pi
vectors.append(np.array([np.cos(rand), np.sin(rand)]))
interpolated_map = np.zeros(size * scale)
def interpolate(x1, x2, w):
t = (w % scale) / scale
return (x2 - x1) * t + x1
def dot_product(a, b):
return a[0] * b[0] + a[1] * b[1]
for i in range(size[1] * scale):
for j in range(size[0] * scale):
dot_products = []
for m in range(4):
corner_vector_x = round(i / scale) + (m % 2)
corner_vector_y = round(j / scale) + int(m / 2)
x = i - corner_vector_x * scale
y = j - corner_vector_y * scale
if corner_vector_x >= size[0]:
corner_vector_x = 0
if corner_vector_y >= size[1]:
corner_vector_y = 0
corner_vector = vectors[corner_vector_x + corner_vector_y * (size[0])]
distance_vector = np.array([x, y])
dot_products.append(dot_product(corner_vector, distance_vector))
x1 = interpolate(dot_products[0], dot_products[1], i)
x2 = interpolate(dot_products[2], dot_products[3], i)
interpolated_map[i][j] = (interpolate(x1, x2, j) / 2 + 1) * 255
img = Image.fromarray(interpolated_map)
img.show()
I'm getting this image:
but I should be getting this:
I don't know what's going wrong, I've tried watching multiple different tutorials, reading a bunch of different articles, but the result is always the same.

Implementing smooth colouring in mandelbrot set

I am trying to colour MandelBrot using HSV values and the PIL Library.
Even after multiple tries fiddling with HSV values, I could not achieve the desired effect.
here is what I currently have
Here is the desired effect
This is the code that I am trying, It could also be beneficial if you could add some tips to optimise the below code to compute the set faster, I am new to python
from PIL import Image
import random
import math
from decimal import Decimal
# Size of the Image Canvas
HEIGHT = 500
ZOOM = 0.0
Y_PAN = 0.0
# Range of the Complex Plane
MIN_X = -2.0 + ZOOM
MAX_X = 2.0 - ZOOM
MAX_Y = 2.0 + Y_PAN - ZOOM
MIN_Y = -2.0 + Y_PAN + ZOOM
DATA = []
def map_to_scale_d(x, in_min, in_max, out_min, out_max):
# returns float
return float((x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min)
def map_to_scale(x, in_min, in_max, out_min, out_max):
# returns int
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
# Max iterations till Zn
ITER = 200
# loop to traverse every single point in Canvas
for y in xrange(HEIGHT):
for x in xrange(HEIGHT):
# convert to complex plane scale
a = map_to_scale_d(x, 0, HEIGHT, MIN_X, MAX_X)
b = map_to_scale_d(y, 0, HEIGHT, MAX_Y, MIN_Y)
# original values
_a = a
_b = b
counter = 0
# start the iteration at (a,b) in complex plane
# calculate z^2 + c
while(counter < ITER):
aa = a * a - b * b
bb = 2 * a * b
a = aa + _a
b = bb + _b
if((abs(aa + bb)) > 4):
break
counter = counter + 1
# initialise color
h = 0
s = map_to_scale(counter, 0, ITER, 0, 100)
v = map_to_scale(counter, 0, ITER, 0, 100)
if(counter == ITER):
h = 0
s = 0
v = 0
# convert to 8-bit
h = map_to_scale(h, 0, 360, 0, 255)
s = map_to_scale(s, 0, 100, 0, 255)
v = map_to_scale(v, 0, 100, 0, 255)
DATA.append((h, s, v))
img = Image.new('HSV', (HEIGHT, HEIGHT))
img.putdata(DATA)
img.show()
img.convert('RGB').save('test.png')

Perlin noise artifacts

I've taken the Wikipedia Perlin Noise Algorithm and implemented it in Python, here is the code:
import random
import math
from PIL import Image
from decimal import Decimal
IMAGE_SIZE = 200
PERLIN_RESOLUTION = 10
GRADIENT = []
for x in range(PERLIN_RESOLUTION + 1):
GRADIENT.append([])
for y in range(PERLIN_RESOLUTION + 1):
angle = random.random() * 2 * math.pi
vector = (
Decimal(math.cos(angle)),
Decimal(math.sin(angle))
)
GRADIENT[x].append(vector)
def lerp(a0, a1, w):
return (1 - w)*a0 + w*a1
def dotGridGradient(ix, iy, x, y):
dx = x - Decimal(ix)
dy = y - Decimal(iy)
return (dx*GRADIENT[iy][ix][0] + dy*GRADIENT[iy][ix][1])
def perlin(x, y):
if x > 0.0:
x0 = int(x)
else:
x0 = int(x) - 1
x1 = x0 + 1
if y > 0.0:
y0 = int(y)
else:
y0 = int(y) - 1
y1 = y0 + 1
sx = x - Decimal(x0)
sy = y - Decimal(y0)
n0 = dotGridGradient(x0, y0, x, y)
n1 = dotGridGradient(x1, y0, x, y)
ix0 = lerp(n0, n1, sx)
n0 = dotGridGradient(x0, y1, x, y)
n1 = dotGridGradient(x1, y1, x, y)
ix1 = lerp(n0, n1, sx)
value = lerp(ix0, ix1, sy)
return value
image = Image.new('RGB', (IMAGE_SIZE, IMAGE_SIZE))
pixels = image.load()
for i in range(IMAGE_SIZE):
x = Decimal(i) / IMAGE_SIZE
for j in range(IMAGE_SIZE):
y = Decimal(j) / IMAGE_SIZE
value = perlin(x * 10, y * 10)
greyscale = (value + 1) * 255 / 2
pixels[i, j] = (greyscale, greyscale, greyscale)
image.save('artifacts.png', 'PNG')
Here is the resulting image that is created by the script:
I must be missing something here, you can very clearly see the vertices. Can anyone let me know what is going wrong?
You need to use smoothstep instead of linear interpolation.
def smoothstep(a0, a1, w):
value = w*w*w*(w*(w*6 - 15) + 10)
return a0 + value*(a1 - a0)

Categories