import scipy.ndimage.morphology as m
import numpy as np
import cv2
def skeletonize(img):
h1 = np.array([[0, 0, 0],[0, 1, 0],[1, 1, 1]])
m1 = np.array([[1, 1, 1],[0, 0, 0],[0, 0, 0]])
h2 = np.array([[0, 0, 0],[1, 1, 0],[0, 1, 0]])
m2 = np.array([[0, 1, 1],[0, 0, 1],[0, 0, 0]])
hit_list = []
miss_list = []
for k in range(4):
hit_list.append(np.rot90(h1, k))
hit_list.append(np.rot90(h2, k))
miss_list.append(np.rot90(m1, k))
miss_list.append(np.rot90(m2, k))
img = img.copy()
while True:
last = img
for hit, miss in zip(hit_list, miss_list):
hm = m.binary_hit_or_miss(img, hit, miss)
img = np.logical_and(img, np.logical_not(hm))
if np.all(img == last):
break
return img
img = cv2.imread("e_5.jpg",0)
ret,img = cv2.threshold(img,127,255,0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
img = 255 - img
img = cv2.dilate(img, element, iterations=3)
skel = skeletonize(img)
imshow(skel, cmap="gray", interpolation="nearest")
I have been trying this code inorder to skeletonize an image without any gaps in between the skeleton lines. But whenever I run the program , an error is hitting"imshow in not defined".
I tried plt.imshow, at this point, there was neither an error nor an output image. Can anyone tell me where am going wrong.
You may have forgotten to import the module using as short alias plt (note that the name can be replaced by what you wants), AND call the show() command so add the following to your code:
import matplotlib.pyplot as plt
plt.imshow()
plt.show()
see for more information: https://stackoverflow.com/a/3497922/4716013
Related
I'm trying to get the coordinate of every end point on every line, but i couldn't come up with a solution, this is what I've currently got but its finding the outline of the lines not the lines itself
import cv2
import numpy as np
img = cv2.imread('out copy.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
kernel_size = 5
blur_gray = cv2.GaussianBlur(gray,(kernel_size, kernel_size),0)
low_threshold = 50
high_threshold = 150
edges = cv2.Canny(blur_gray, low_threshold, high_threshold)
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 50 # minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
line_image = np.copy(img) * 0 # creating a blank to draw lines on
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(0,255,0),5)
lines_edges = cv2.addWeighted(img, 0.8, line_image, 1, 0)
cv2.imshow('out copy.png', lines_edges)
cv2.waitKey(0) ```
The hit-or-miss transform can be used to find end points of a line after skeletonization.
Code:
img = cv2.imread('image.png')
img2 = img.copy()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# inverse binary image, to make the lines in white
th = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# obtain binary skeleton
sk = cv2.ximgproc.thinning(th, None, 1)
# kernels to find endpoints in all 4 directions
k1 = np.array(([0, 0, 0], [-1, 1, -1], [-1, -1, -1]), dtype="int")
k2 = np.array(([0, -1, -1], [0, 1, -1], [0, -1, -1]), dtype="int")
k3 = np.array(([-1, -1, 0], [-1, 1, 0], [-1, -1, 0]), dtype="int")
k4 = np.array(([-1, -1, -1], [-1, 1, -1], [0, 0, 0]), dtype="int")
# perform hit-miss transform for every kernel
o1 = cv2.morphologyEx(sk, cv2.MORPH_HITMISS, k1)
o2 = cv2.morphologyEx(sk, cv2.MORPH_HITMISS, k2)
o3 = cv2.morphologyEx(sk, cv2.MORPH_HITMISS, k3)
o4 = cv2.morphologyEx(sk, cv2.MORPH_HITMISS, k4)
# add results of all the above 4
out = o1 + o2 + o3 + o4
# find points in white (255) and draw them on original image
pts = np.argwhere(out == 255)
for pt in pts:
img2 = cv2.circle(img2, (pt[1], pt[0]), 15, (0,255,0), -1)
Why this mask layer does not mask image.
import matplotlib.image as mpimg
import numpy
path = 'inp.jpg'
arr = numpy.array(Image.open(path))
img = mpimg.imread(path)
black_pixels_mask = np.all(img == [0, 0, 0], axis=-1)
img[black_pixels_mask] = [255,255,255]
The result img should be the masked one.and the code should replace black to white. Just as a sample colour.
Try this code
arr[numpy.all(arr == [0, 0, 0], axis=-1)]=[255,255,255]
data = Image.fromarray(arr)
data.save(path)
i am new in python , my probleme it's about edit some changes in an image grayscale , i wanna make a binarization for this image , the values of pixels bigger then 100 take the value 1 (white), and the values low than 100 takes the value 0 (black)
so any suggestion plz (sorry for my bad english)
my code :
`import numpy as np
import cv2
image = cv2.imread('Image3.png', 0)
dimension = image.shape
height = dimension[0]
width = dimension[1]
#finalimage = np.zeros((height, width))
for i in range(height) :
for j in range(width):
if (image[i, j] > 100):
image[i][j] = [1]
else:
image[i][j] = [0]
cv2.imshow('binarizedImage',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
You can try use OpenCV function cv2.threshold for binarize.
import cv2
img = cv2.imread('Image3.png', cv2.IMREAD_GRAYSCALE)
thresh = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY)[1]
cv2.imshow('binarizedImage',thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
I think you just want to use np.where():
import numpy as np
image = np.array([[200, 50, 200],[50, 50 ,50],[10, 255, 10]]) #this will be your image instead
In [11]: image
Out[11]:
array([[200, 50, 200],
[ 50, 50, 50],
[ 10, 255, 10]])
In [12]: np.where(image > 100, 1, 0)
Out[12]:
array([[1, 0, 1],
[0, 0, 0],
[0, 1, 0]])
I have been trying to reduce the noise in the attached image. Basically, I want to remove the background dust from the image. Currently, I have tried looking for small points throughout the image(anything that fits within a 10 by 10 grid with low Green pixel intensity) and then blacked out the 10 by 10 region. However, I was hoping to remove more noise from the image. Is there a possibly way to run some filters in OpenCv to do so.
A simple approach can be: Convert the image to grayscale, threshold it, and the apply morphological opening in it to get estimate results.
img = cv2.imread("commitdust.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, th = cv2.threshold(gray, 80, 255, cv2.THRESH_BINARY)
k = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]], dtype=np.uint8)
th = cv2.morphologyEx(th, cv2.MORPH_OPEN, k)
cv2.imshow("th", th)
cv2.waitKey(0)
cv2.destroyAllWindows()
I am using cv2.VideoWriter() as an intermediate step in a larger image processing workflow. Basically I have a stack of images that need to be turned into a timelapse, and then the frames in that are processed and then used downstream to mask original imagery. My masking isn't working because array sizes to not correspond with one another, and I've diagnosed the problem to arise from cv2.VideoWriter(). My time lapse assembly process came from here.
There are a ton of posts about cv2.VideoWriter() not working because the frame size is wrong etc. but my problem is not that the video won't write - it's that dimensions of my imagery are being changed. In fact, I'm not even sure if the top row or bottom row is what's being cut off, or if there is some underlying resampling step or something.
import cv2
import numpy as np
import glob
imgs = glob.glob('*.jpg')
img_array = []
for filename in imgs:
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
size # calling `size` returns (250,187)
out = cv2.VideoWriter('project.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
cap = cv2.VideoCapture('project.avi')
mycap = cap.read()
mycap[1].shape # this returns (186,250,3)
I would have expected mycap[1].shape to have the same attributes as size but while size indicates I have a 250 pixel wide and 187 pixel tall array, mycap[1].shape shows that the video has dimensions 250x186.
After some testing I confirmed that cv2.VideoWriter() is not simply clipping an image with odd dimension values, but is instead altering values in the arrays while changing dimensions:
import numpy as np
import pylab as plt
import cv2 as cv
# Create RGB layers
r = np.array([[255, 0, 255, 0, 255, 0, 255, 0, 255], [255, 0, 255, 0, 255, 0, 255, 0, 255], [255, 0, 255, 0, 255, 0, 255, 0, 255]],dtype=np.uint8)
g = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]],dtype=np.uint8)
b = np.array([[10, 0, 10, 0, 10, 0, 255, 0, 255], [10, 0, 10, 0, 10, 0, 255, 0, 255], [10, 0, 10, 0, 10, 0, 255, 0, 255]],dtype=np.uint8)
# Create a few image layers
rgb1 = np.dstack((r,g,b))
rgb2 = np.dstack((r,g,b))
rgb3 = np.dstack((r,g,b))
rgb4 = np.dstack((r,g,b))
plt.imshow(rgb1)
imgs = [rgb1,rgb2,rgb3,rgb4]
# Create timelapse
img_array = []
for img in imgs:
height, width, layers = img.shape
size = (width,height)
img_array.append(img)
out = cv.VideoWriter('SO_question.avi',cv.VideoWriter_fourcc(*'DIVX'), 15, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
# Read video in
cap = cv.VideoCapture('SO_question.avi')
cap.read()[1].shape
plt.imshow(cap.read()[1])
plt.imshow(rgb1) produces the following image:
But plt.imshow(cap.read()[1]) produces the following image:
Furthermore, using print(cap.read()[1]) shows that array values are not maintained across the process. Thus, I conclude that a resampling process is occurring (rather than a simple crop step) when width and height are an odd number of pixels.