Trouble implementing Perlin noise in python - python

I'm attempting to implement the algorithm for generating 2D Perlin noise here but I'm having some trouble doing it in Python (which I am relatively new to).
I was expecting the final noise values ('z' in the linked example), to be somewhere between 0.0 and 1.0, but that's not what I'm getting. My code is below, I'd really appreciate any input.
Thanks!
perlin.py:
import math
import numpy
import random
import vector as vctr
from PIL import Image
def dot(v1, v2):
"""
Returns the dot product of the two input vectors.
Args:
v1 - First vector
v2 - Second vector
Return:
Resulting dot product
"""
return (v1.x * v2.x) + (v1.y * v2.y)
def fade(t):
"""
Fade 3t^2 - 2t^3
Args:
t - Value to fade.
Return:
Faded value.
"""
return (3 * (t ** 2)) - (2 * (t ** 3))
def lerp(minVal, maxVal, term):
"""
Args:
Return:
"""
return (maxVal - minVal) * term + minVal
def generateImage(noises, file="perlin.png"):
"""
Generates a image on disc of the resulting noise values
Args:
noises (list) - 2d list of noise values
file (str) - location of file to write to
"""
pixels = numpy.zeros((height, width, 3), dtype=numpy.uint8)
for x in range(0, width):
for y in range(0, height):
rgb = 255 * noises[x][y]
pixels[x, y] = [rgb, rgb, rgb]
# Print pixels as image
img = Image.fromarray(pixels, 'RGB')
img.save(file)
# Define the noise region
width = 300
height = 300
# Column ordered array of generated gradient vectors
g = numpy.zeros((width + 1, height + 1)).tolist()
# List of final noise values
z = numpy.zeros((width, height)).tolist()
# Fill list with randomly directed unit vectors (one for each grid point)
for x in range(0, width + 1):
for y in range(0, height + 1):
randX = random.uniform(-1.0, 1.0)
randY = random.uniform(-1.0, 1.0)
length = math.sqrt(randX**2 + randY**2)
g[x][y] = vctr.vector(randX / length, randY / length)
# For each cell in the sampling space (i.e. each pixel)
for x in range(0, width):
for y in range(0, height):
# Generate random point (p) within and relative to current cell
pX = random.uniform(0.0, 1.0)
pY = random.uniform(0.0, 1.0)
# Get the gradient vectors for each cell corner
g_tl = g[x][y]
g_tr = g[x + 1][y]
g_bl = g[x][y + 1]
g_br = g[x + 1][y + 1]
# Vectors from each cell corner to the generated point
# X axis is positive going right, Y is positive going down
tl = vctr.vector(pX, pY)
tr = vctr.vector(pX - 1, pY)
bl = vctr.vector(pX, pY - 1)
br = vctr.vector(pX - 1, pY - 1)
# Dot product these vectors to get gradient values
u = dot(tl, g_tl)
v = dot(tr, g_tr)
s = dot(bl, g_bl)
t = dot(br, g_br)
# Interpolate the gradient values
sX = fade(pX)
sY = fade(pY)
a = s + (sX * (t - s))
b = u + (sX * (v - u))
value = a + (sY * (a - b))
if (value < 0.0) or (value > 1.0):
print("VALUE IS OUT OF BOUNDS??? " + str(value))
z[x][y] = value
generateImage(z)
print("Completed Perlin noise generation!")
vector.py:
class vector:
def __init__(self, x, y):
"""
Initialise a new vector in 2D space with the input X and Y values.
x: X value of vector
y: Y value of vector
"""
self.x = x
self.y = y

Related

Perlin noise generator isn't working, doesn't look smooth

I watched some tutorials and tried to create a Perlin noise generator in python.
It takes in a tuple for the number of vectors in the x and y directions and a scale for the distance in pixels between the arrays, then calculates the dot product between each pixel and each of the 4 arrays surrounding it, It then interpolates them bilinearly to get the pixel's value.
here's the code:
from PIL import Image
import numpy as np
scale = 16
size = np.array([8, 8])
vectors = []
for i in range(size[0]):
for j in range(size[1]):
rand = np.random.rand() * 2 * np.pi
vectors.append(np.array([np.cos(rand), np.sin(rand)]))
interpolated_map = np.zeros(size * scale)
def interpolate(x1, x2, w):
t = (w % scale) / scale
return (x2 - x1) * t + x1
def dot_product(a, b):
return a[0] * b[0] + a[1] * b[1]
for i in range(size[1] * scale):
for j in range(size[0] * scale):
dot_products = []
for m in range(4):
corner_vector_x = round(i / scale) + (m % 2)
corner_vector_y = round(j / scale) + int(m / 2)
x = i - corner_vector_x * scale
y = j - corner_vector_y * scale
if corner_vector_x >= size[0]:
corner_vector_x = 0
if corner_vector_y >= size[1]:
corner_vector_y = 0
corner_vector = vectors[corner_vector_x + corner_vector_y * (size[0])]
distance_vector = np.array([x, y])
dot_products.append(dot_product(corner_vector, distance_vector))
x1 = interpolate(dot_products[0], dot_products[1], i)
x2 = interpolate(dot_products[2], dot_products[3], i)
interpolated_map[i][j] = (interpolate(x1, x2, j) / 2 + 1) * 255
img = Image.fromarray(interpolated_map)
img.show()
I'm getting this image:
but I should be getting this:
I don't know what's going wrong, I've tried watching multiple different tutorials, reading a bunch of different articles, but the result is always the same.

How to implement 3D bilinear interpolation using numpy?

I have reached to this bilinear interpolation code (added here), but I would like to improve this code to 3D, meaning update it to work with an RGB image (3D, instead of only 2D).
If you have any suggestions of how I can to that I would love to know.
This was the one dimension linear interpolation:
import math
def linear1D_resize(in_array, size):
"""
`in_array` is the input array.
`size` is the desired size.
"""
ratio = (len(in_array) - 1) / (size - 1)
out_array = []
for i in range(size):
low = math.floor(ratio * i)
high = math.ceil(ratio * i)
weight = ratio * i - low
a = in_array[low]
b = in_array[high]
out_array.append(a * (1 - weight) + b * weight)
return out_array
And this for the 2D:
import math
def bilinear_resize(image, height, width):
"""
`image` is a 2-D numpy array
`height` and `width` are the desired spatial dimension of the new 2-D array.
"""
img_height, img_width = image.shape[:2]
resized = np.empty([height, width])
x_ratio = float(img_width - 1) / (width - 1) if width > 1 else 0
y_ratio = float(img_height - 1) / (height - 1) if height > 1 else 0
for i in range(height):
for j in range(width):
x_l, y_l = math.floor(x_ratio * j), math.floor(y_ratio * i)
x_h, y_h = math.ceil(x_ratio * j), math.ceil(y_ratio * i)
x_weight = (x_ratio * j) - x_l
y_weight = (y_ratio * i) - y_l
a = image[y_l, x_l]
b = image[y_l, x_h]
c = image[y_h, x_l]
d = image[y_h, x_h]
pixel = a * (1 - x_weight) * (1 - y_weight) + b * x_weight * (1 - y_weight) + c * y_weight * (1 - x_weight) + d * x_weight * y_weight
resized[i][j] = pixel # pixel is the scalar with the value comptued by the interpolation
return resized
Check out some of the scipy ndimage interpolate functions. They will do what you're looking for and are 'using numpy'.
They are also very functional, fast and have been tested many times.
Richard

Spectral and Spatial Measures of Sharpness - How to calculate slope of magnitude spectrum?

I am trying to implement the S1 measure (Spectral Measure of Sharpness - Section III-A) from this paper. Here we have to calculate slope (alpha) of the magnitude spectrum for an image in order to measure sharpness. I am able to write the other part of the algorithm, but unable to calculate the slope. Here is my code. Function 'alpha' is where I calculate the magnitude_spectrum and I think using this we can calculate the slope but am not sure how to do that -
def aplha(image_block):
img_float32 = np.float32(image_block)
dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
return output (??)
Rest of the code:
def S1_calc(alpha):
tou1 = -3
tou2 = 2
output = 1 - (1 / (1 + np.exp(tou1 * (alpha - tou2))))
return output
def lx(image_block):
b = 0.7656
k = 0.0364
y = 2.2
return np.power((b + k * image_block), y)
def contrast(lx_val):
T1 = 5
T2 = 2
max_val = np.max(lx_val)
min_val = np.min(lx_val)
mean_val = np.mean(lx_val)
return (((max_val - min_val) < T1) or (mean_val < T2))
def image_gray(image_RGB):
output = (0.2989 * image_RGB[:,:,0] +
0.5870 * image_RGB[:,:,1] +
0.1140 * image_RGB[:,:,2])
return output
def S1(gray_image, m = 32, d = 24):
### SPECTRAL MEASURE OF SHARPNESS ###
# m = each block size
# d = overlapping pixels of neighbouring blocks
h,w = gray_image.shape
output = gray_image.copy()
row = 0
while (row < h):
col = 0
while (col < w):
top = row
bottom = min(row + m, h)
left = col
right = min(col + m, w)
image_block = gray_image[top : bottom, left : right]
lx_val = lx(image_block)
contrast_bool = contrast(lx_val)
if contrast_bool==True:
output[top : bottom, left : right] = 0
else:
alpha_val = aplha(image_block)
output[top : bottom, left : right] = S1_calc(alpha_val)
col = col + m - d
row = row + m - d
return output
Am using jupyter notebook, python 3.6
You could check this MATLAB code. See also another MATLAB code.
According to the latter one, we need to know freq and power value, and then we could fit these two var with a linear function, the slope of the line is what we need. We could get the slope with np.polyfit.
Now, our question is how to get the freq of a image, you could do this:
from skimage.data import camera
import numpy as np
image = camera()
height, width = image.shape
u, v = np.meshgrid(np.arange(height // 2), np.arange(width // 2))
freq = np.round(np.sqrt(u**2 + v**2)).astype(np.int64)
Now freq should be the same shape as fft transform of the input image. You need to sum all value of the magnitude_spectrum where they have the same freq, like this:
freq_uniq = np.unique(freq.flatten())
y = []
for value in f_uniq:
y.append(magnitude_spectrum[f == value].sum())
y = np.array(y)
Finally, you could just fit freq_uniq and y and get the slope. You might need to scale them with np.log first.

projecting points onto a parallel grid at a depth from camera (maya)

I am attempting to create a grid of locators that serve as projected points onto a parallel finite plane from a camera in maya at a specified depth. The grid should line up with a specified resolution so as to match rendered output.
At the moment my calculations are off and I am looking for some help to ascertain how my formula for ascertaining the projected points is incorrect.
I have a self contained python script and image showing the current position of locators that are spawned as an example.
image showing current spawned locators are off on y and z axis
import maya.cmds as mc
import maya.OpenMaya as om
res = [mc.getAttr('defaultResolution.width'),
mc.getAttr('defaultResolution.height')]
print res
grid = [5, 5]
def projectedGridPoint(camera, coord, depth, res):
selList = om.MSelectionList()
selList.add(camera)
dagPath = om.MDagPath()
selList.getDagPath(0,dagPath)
dagPath.extendToShape()
camMtx = dagPath.inclusiveMatrix()
fnCam = om.MFnCamera(dagPath)
mFloatMtx = fnCam.projectionMatrix()
projMtx = om.MMatrix(mFloatMtx.matrix)
#center of camera
eyePt = fnCam.eyePoint()
#offset position
z = eyePt.z - depth
#calculated xy positions
x = (2 * z * coord[0] / res[0]) - z
y = (2 * z * coord[1] / res[1]) - z
return om.MPoint(x,y,depth) * camMtx * projMtx.inverse()
for y in range(grid[1] + 1):
for x in range(grid[0] + 1):
coord = ( x / float(grid[0]) * res[0], y / float(grid[1]) * res[1] )
pt = projectedGridPoint('camera1', coord, 10, res)
mc.spaceLocator(a=1, p=[pt.x, pt.y, pt.z])
Once I adjusted Theodox's answer to account for all possible grid divisions, such that the ndc_x and ndc_y was always in the range of -1 and 1. I was able to get a working solution.
import maya.api.OpenMaya as om
import maya.cmds as cmds
def projectedGridPoint(camera, coord, depth):
selList = om.MGlobal.getSelectionListByName(camera)
dagPath = selList.getDagPath(0)
dagPath.extendToShape()
view = dagPath.inclusiveMatrix()
fnCam = om.MFnCamera(dagPath)
projection = om.MMatrix(fnCam.projectionMatrix())
viewProj = projection * view
r = om.MPoint(coord[0],coord[1], -1 * depth) * projection.inverse()
return r.homogenize() * view
xx, yy = (6, 6)
for y in range(yy + 1):
for x in range(xx + 1):
ndc_x = -1
ndc_y = -1
if x > 0:
ndc_x = (x / float(xx) * 2) - 1
if y > 0:
ndc_y = (y / float(yy) * 2) - 1
coord = ( ndc_x, ndc_y)
print coord
pt = projectedGridPoint('camera1', coord, 0)
c,_ = cmds.polyCube(w = 0.1, d = 0.1, h = 0.1)
cmds.xform(c, t = (pt[0], pt[1], pt[2]))
I think you want something a bit more like this (note, i converted it to API 2 to cut down on the boilerplate)
import maya.api.OpenMaya as om
import maya.cmds as cmds
def projectedGridPoint(camera, coord, depth):
selList = om.MGlobal.getSelectionListByName(camera)
dagPath = selList.getDagPath(0)
dagPath.extendToShape()
view = dagPath.inclusiveMatrix()
fnCam = om.MFnCamera(dagPath)
projection = om.MMatrix(fnCam.projectionMatrix())
viewProj = projection * view
r = om.MPoint(coord[0],coord[1], -1 * depth) * projection.inverse()
return r.homogenize() * view
xx, yy = (2, 2)
for y in range(yy):
for x in range(xx):
ndc_x = 2.0 * x / float(xx - 1) - 1
ndc_y = 2.0 * y / float(yy - 1) - 1
coord = ( ndc_x, ndc_y)
pt = projectedGridPoint('camera1', coord,0)
c,_ = cmds.polyCube(w = 0.1, d = 0.1, h = 0.1)
cmds.xform(c, t = (pt[0], pt[1], pt[2]))
The coords are supplied as normalized device coordinates (from -1,-1 to 1, 1 at the corners of the view) and the depth goes from the near to far clip planes -- a depth of 1 is right on the near plane and a depth of 0 is on the far plane. I think in practice I'd lock the depth at 0 and use the clip plane setting on the camera to set the depth
edit I rationalized the original, wonky method of converting index values to NDC coordinates

Drawing directions fields

Is there a way to draw direction fields in python?
My attempt is to modify http://www.compdigitec.com/labs/files/slopefields.py giving
#!/usr/bin/python
import math
from subprocess import CalledProcessError, call, check_call
def dy_dx(x, y):
try:
# declare your dy/dx here:
return x**2-x-2
except ZeroDivisionError:
return 1000.0
# Adjust window parameters
XMIN = -5.0
XMAX = 5.0
YMIN = -10.0
YMAX = 10.0
XSCL = 0.5
YSCL = 0.5
DISTANCE = 0.1
def main():
fileobj = open("data.txt", "w")
for x1 in xrange(int(XMIN / XSCL), int(XMAX / XSCL)):
for y1 in xrange(int(YMIN / YSCL), int(YMAX / YSCL)):
x= float(x1 * XSCL)
y= float(y1 * YSCL)
slope = dy_dx(x,y)
dx = math.sqrt( DISTANCE/( 1+math.pow(slope,2) ) )
dy = slope*dx
fileobj.write(str(x) + " " + str(y) + " " + str(dx) + " " + str(dy) + "\n")
fileobj.close()
try:
check_call(["gnuplot","-e","set terminal png size 800,600 enhanced font \"Arial,12\"; set xrange [" + str(XMIN) + ":" + str(XMAX) + "]; set yrange [" + str(YMIN) + ":" + str(YMAX) + "]; set output 'output.png'; plot 'data.txt' using 1:2:3:4 with vectors"])
except (CalledProcessError, OSError):
print "Error: gnuplot not found on system!"
exit(1)
print "Saved image to output.png"
call(["xdg-open","output.png"])
return 0
if __name__ == '__main__':
main()
However the best image I get from this is.
How can I get an output that looks more like the first image? Also, how can I add the three solid lines?
You can use this matplotlib code as a base. Modify it for your needs.
I have updated the code to show same length arrows. The important option is to set the angles option of the quiver function, so that the arrows are correctly printed from (x,y) to (x+u,y+v) (instead of the default, which just takes into account of (u,v) when computing the angles).
It is also possible to change the axis form "boxes" to "arrows". Let me know if you need that change and I could add it.
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import numpy as np
fig = plt.figure()
def vf(x, t):
dx = np.zeros(2)
dx[0] = 1.0
dx[1] = x[0] ** 2 - x[0] - 2.0
return dx
# Solution curves
t0 = 0.0
tEnd = 10.0
# Vector field
X, Y = np.meshgrid(np.linspace(-5, 5, 20), np.linspace(-10, 10, 20))
U = 1.0
V = X ** 2 - X - 2
# Normalize arrows
N = np.sqrt(U ** 2 + V ** 2)
U = U / N
V = V / N
plt.quiver(X, Y, U, V, angles="xy")
t = np.linspace(t0, tEnd, 100)
for y0 in np.linspace(-5.0, 0.0, 10):
y_initial = [y0, -10.0]
y = odeint(vf, y_initial, t)
plt.plot(y[:, 0], y[:, 1], "-")
plt.xlim([-5, 5])
plt.ylim([-10, 10])
plt.xlabel(r"$x$")
plt.ylabel(r"$y$")
I had a lot of fun making one of these as a hobby project using pygame. I plotted the slope at each pixel, using shades of blue for positive and shades of red for negative. Black is for undefined. This is dy/dx = log(sin(x/y)+cos(y/x)):
You can zoom in & out - here is zoomed in on the middle upper part here:
and also click on a point to graph the line going through that point:
It's just 440 lines of code, so here is the .zip of all the files. I guess I'll excerpt relevant bits here.
The equation itself is input as a valid Python expression in a string, e.g. "log(sin(x/y)+cos(y/x))". This is then compiled. This function here graphs the color field, where self.func.eval() gives the dy/dx at the given point. The code is a bit complicated here because I made it render in stages - first 32x32 blocks, then 16x16, etc. - to make it snappier for the user.
def graphcolorfield(self, sqsizes=[32,16,8,4,2,1]):
su = ScreenUpdater(50)
lastskip = self.xscreensize
quitit = False
for squaresize in sqsizes:
xsquaresize = squaresize
ysquaresize = squaresize
if squaresize == 1:
self.screen.lock()
y = 0
while y <= self.yscreensize:
x = 0
skiprow = y%lastskip == 0
while x <= self.xscreensize:
if skiprow and x%lastskip==0:
x += squaresize
continue
color = (255,255,255)
try:
m = self.func.eval(*self.ct.untranscoord(x, y))
if m >= 0:
if m < 1:
c = 255 * m
color = (0, 0, c)
else:
#c = 255 - 255 * (1.0/m)
#color = (c, c, 255)
c = 255 - 255 * (1.0/m)
color = (c/2.0, c/2.0, 255)
else:
pm = -m
if pm < 1:
c = 255 * pm
color = (c, 0, 0)
else:
c = 255 - 255 * (1.0/pm)
color = (255, c/2.0, c/2.0)
except:
color = (0, 0, 0)
if squaresize > 1:
self.screen.fill(color, (x, y, squaresize, squaresize))
else:
self.screen.set_at((x, y), color)
if su.update():
quitit = True
break
x += xsquaresize
if quitit:
break
y += ysquaresize
if squaresize == 1:
self.screen.unlock()
lastskip = squaresize
if quitit:
break
This is the code which graphs a line through a point:
def _grapheqhelp(self, sx, sy, stepsize, numsteps, color):
x = sx
y = sy
i = 0
pygame.draw.line(self.screen, color, (x, y), (x, y), 2)
while i < numsteps:
lastx = x
lasty = y
try:
m = self.func.eval(x, y)
except:
return
x += stepsize
y = y + m * stepsize
screenx1, screeny1 = self.ct.transcoord(lastx, lasty)
screenx2, screeny2 = self.ct.transcoord(x, y)
#print "(%f, %f)-(%f, %f)" % (screenx1, screeny1, screenx2, screeny2)
try:
pygame.draw.line(self.screen, color,
(screenx1, screeny1),
(screenx2, screeny2), 2)
except:
return
i += 1
stx, sty = self.ct.transcoord(sx, sy)
pygame.draw.circle(self.screen, color, (int(stx), int(sty)), 3, 0)
And it runs backwards & forwards starting from that point:
def graphequation(self, sx, sy, stepsize=.01, color=(255, 255, 127)):
"""Graph the differential equation, given the starting point sx and sy, for length
length using stepsize stepsize."""
numstepsf = (self.xrange[1] - sx) / stepsize
numstepsb = (sx - self.xrange[0]) / stepsize
self._grapheqhelp(sx, sy, stepsize, numstepsf, color)
self._grapheqhelp(sx, sy, -stepsize, numstepsb, color)
I never got around to drawing actual lines because the pixel approach looked too cool.
Try changing your values for the parameters to this:
XSCL = .2
YSCL = .2
These parameters determine how many points are sampled on the axes.
As per your comment, you'll need to also plot the functions for which the derivation dy_dx(x, y) applies.
Currently, you're only calculating and plotting the slope lines as calculated by your function dy_dx(x,y). You'll need to find (in this case 3) functions to plot in addition to the slope.
Start by defining a function:
def f1_x(x):
return x**3-x**2-2x;
and then, in your loop, you'll have to also write the desired values for the functions into the fileobj file.

Categories