How to rotate all the images to the same orientation? - python

I am trying to do image classification task and want to make sure my input data all have the same orientation.
The code bellow did not match all the images to the same directions and some flipped wrongly.
I will be thankful if anyone can help me with this matter, Thank you
original image 1
original image 2
import cv2
import numpy as np
import matplotlib.pyplot as plt
def getSubImage(rect, image):
center, size, theta = rect
center, size = tuple(map(int, center)), tuple(map(int, size))
M = cv2.getRotationMatrix2D( center, theta, 1)
dst = cv2.warpAffine(image, M, src.shape[:2])
out = cv2.getRectSubPix(dst, size, center)
return out
image = cv2.imread('orginal1.png')
im_bw = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(im_bw, (5,5), 0)
im_bw = cv2.Canny(blur, 10, 90)
contours, hierarchy = cv2.findContours(im_bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rect = cv2.minAreaRect(contours[0])
out = getSubImage(rect, image)
cv2.imwrite('rotedorginal1.jpg', out)
plt.imshow(out)
plt.show()

You just need to build the matrix that rotates the image the right angle using cv2.getRotationMatrix2D and applying the operation with the matrix using cv2.warpAffine:
(x, y), (w, h), angle = cv2.minAreaRect(contours[0])
result = cv2.warpAffine(image, cv2.getRotationMatrix2D((image.shape[1]//2, image.shape[0]//2), angle-90, 1), (image.shape))
The code above transforms this image:
into this one:

Related

Fit theoretical dartboard in image containing dartboard

I have the following image containing a dartboard
After processing the image looks as follows:
In addition, I have a function that creates a theoretical dartboard:
import cv2
import numpy as np
def draw_dartboard():
IMG = np.ones((400, 400), 'uint8') * 255
center = (int(IMG.shape[0] // 2), int(IMG.shape[1] // 2))
size_dartboard = int(340)
r_board = int(170)
r_db = int(6.35)
r_sb = int(15.9)
r_doubles = int(162)
r_triples = int(99)
width_rings = int(8)
cv2.circle(IMG, center, r_doubles + width_rings, (0,0,0), -1)
cv2.circle(IMG, center, r_doubles, (255,255,255), -1)
cv2.circle(IMG, center, r_triples + width_rings, (0,0,0), -1)
cv2.circle(IMG, center, r_triples, (255,255,255), -1)
thetas_min = np.radians([(18 * t - 9) for t in range(20)])
thetas_max = np.radians([(18 * t + 9) for t in range(20)])
for idx, (theta_min, theta_max) in enumerate(zip(thetas_min, thetas_max)):
if (idx % 2) == 0:
x_min = int(center[0] + r_board * np.cos(theta_min))
y_min = int(center[1] + r_board * np.sin(theta_min))
x_max = int(center[0] + r_board * np.cos(theta_max))
y_max = int(center[1] + r_board * np.sin(theta_max))
cv2.fillPoly(IMG, np.array([(center, (x_min,y_min), (x_max,y_max))]), (0,0,0))
cv2.circle(IMG, center, r_sb, (0,0,0), -1)
return IMG
The output of this image looks as follows:
How can I “fit” the theoretical dartboard in the real image? Clearly, there is a mismatch in orientation and scale. What's the best way to do this?
You can register your dartboard image (i.e. source image) to the one you processed (i.e. destination image) by using affine transformations.
Here is my approach, and the outcome.
import cv2
import matplotlib.pyplot as plt
import numpy as np
# read images and remove matplotlib axes
src = cv2.imread('source.png',0)
src = src[20:-30,40:-20]
dest = cv2.imread('dest.png',0)
dest = dest[40:-40,40:-40]
# find matching points manually
dest_pts = np.array([[103,29],[215,13],[236,125]]).astype(np.float32) # x,y
src_pts = np.array([[19,175],[145,158],[176,284]]).astype(np.float32) #x,y
# calculate the affine transformation matrix
warp_mat = cv2.getAffineTransform(src_pts, dest_pts)
# get the registered source image
warp_dst = cv2.warpAffine(src, warp_mat, (dest.shape[1], dest.shape[0]))
fig,ax = plt.subplots(1,3)
ax[0].imshow(src,'gray')
ax[0].scatter(src_pts[:,0],src_pts[:,1],s=1,c='r')
ax[0].set_title('src')
ax[1].imshow(dest,'gray')
ax[1].scatter(dest_pts[:,0],dest_pts[:,1],s=1,c='r')
ax[1].set_title('dest')
ax[2].imshow(warp_dst,'gray')
ax[2].set_title('registered src')
plt.savefig('result.png')
fig, ax = plt.subplots(1)
ax.imshow(dest,'gray')
ax.imshow(warp_dst,cmap='jet',alpha=0.5)
plt.savefig('overlayed_result.png')
# plt.show()
In order to calculate affine transformation matrix, you will need 3 matching points on both images. I highlighted the points I chose on both images. FYI, you can develop a way to automate finding matching points, let us know in your question if you need that.
As you have already done the image processing, I will take it from there. So just to be clear, this is the image I will be working with (I cropped out the matplotlib axises, as I'm sure they aren't present in your actual image):
The concept is really simple:
Find the bounding box of the contour of the target.
With the bounding box, we can find the radius of the target by selecting the greatest among the dimensions (width and height) of the bounding box, and dividing it by 2.
With the radius of the target and the top-left corner coordinates of the target (returned when finding the bounding box of the target), we can find the center of the target with the expressions x + r and y + h - r.
With the radius of the target, you can scale your theoretical target accordingly, and with the center of the target, you can draw your theoretical target at the right coordinates.
Here is how the code goes, where Image.png is the above image. Note that I only draw one circle onto the image; the rest of them can be drawn on using the same way, with just some added scaling:
import cv2
import numpy as np
img = cv2.imread("Image.png")
img_processed = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
contours, _ = cv2.findContours(img_processed, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
cnt = sorted(contours, key=cv2.contourArea)[-2]
x, y, w, h = cv2.boundingRect(cnt)
r = max(w, h) // 2
center_x = x + r
center_y = y + h - r
cv2.circle(img, (center_x, center_y), r, (0, 255, 0), 5)
cv2.imshow("Image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Output:
Note that at this line:
cnt = sorted(contours, key=cv2.contourArea)[-2]
I am getting the contour with the second-greatest area, as the one with the greatest area would be the border of the image.

Finding the coordinates of the edges on a rectangluar object

I am trying to build a document scanner application from scratch using OpenCV and python. Till now i have done the following:
re-scaled the image
preprocessed the image, that is converted to greyscale, applied the Gaussian blur, applied adaptive threshold and finally used canny edge detection.
I then found the largest contour and drew it
Detected the edges of the contour and drew them
step 4 is where the problem is, I'm getting two of the points in the correct location however two seem to be slightly offset.
I can't seem to understand what I'm doing wrong, additionally could this problem potentially be due to the way i have preprocessed the image?
import cv2
import numpy as np
# Function to resize the image
def Re_scaleImg(img):
scale_percent = 50
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize the image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
return resized
# Function to process the image
def process(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
edged = cv2.Canny(thresh, 75, 200)
#cv2.imshow("blur", blur)
#cv2.imshow("edged", thresh)
return edged
# Function to find the areas of contours
def find_contourArea(contours):
areas = []
for cnt in contours:
cont_area = cv2.contourArea(cnt)
areas.append(cont_area)
return areas
image = cv2.imread("receipt.jpeg")
resized = Re_scaleImg(image)
processed_img = process(resized)
# finding the contours
contours, hierarchy = cv2.findContours(processed_img.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
resized_copy1 = resized.copy()
# sorting the contours
sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)
largest_contour = sorted_contours[0]
epsilon = 0.01*cv2.arcLength(largest_contour, True)
approximation = cv2.approxPolyDP(largest_contour, epsilon, True)
cv2.drawContours(resized_copy1, [approximation], -1, (0, 255, 0), 3)
# Obtaining the corners of the rectangle
rot_rect = cv2.minAreaRect(largest_contour)
box = cv2.boxPoints(rot_rect)
box = np.int0(box)
for p in box:
pt = (p[0], p[1])
cv2.circle(resized_copy1, pt, 10, (255, 0, 0), -1)
print(pt)
cv2.imshow("contours", resized_copy1)
cv2.waitKey(0)
Both the images are shown below:
the original image:
output image:
Instead of finding the full contour are you could try to find the lines on the edge of the document instead.
With the Hough Line Transform you can find the four most prominent lines (with the most votes).
From these lines you can then calculate the intersection points and use the four points closes to the center of the full shape as corner points.

How can I apply the contours of a downsized image to the original image?

I have a perfect code for finding the contours with OpenCV. But, my code processes a downsized image for improving the computational speed. How can I apply the contours of a downsized image to the original image?
This is my Python code:
# Image Read and Resizing
source_image = cv.imread(image_path)
copied_image = source_image.copy()
copied_image = imutils.resize(copied_image, height=500)
# Apply GaussianBlur + OTSU-Thresholding
grayscale_image = cv.cvtColor(copied_image, cv.COLOR_BGR2GRAY)
grayscale_image = cv.GaussianBlur(grayscale_image, (5, 5), 0)
ret, grayscale_image = cv.threshold(grayscale_image, 200, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
# Find Contours
contours, hierarchy = cv.findContours(grayscale_image, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contour_sizes = [(cv.contourArea(contour), contour) for contour in contours]
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
# Crop Image
x, y, w, h = cv.boundingRect(biggest_contour)
cropped_image = copied_image[y:y + h, x:x + w]
copied_image is smaller than the source_image. I only used the largest contour. Now, I want to apply the found contour with the source_image. However, in my code, the acquired contour is based on the copied_image.
If you can live with an (in)accuracy of 1 or 2 pixels, a quite simple solution would be to just multiply the x, y, w, h values of your bounding rectangle with the corresponding scaling factors:
import cv2
import numpy as np
# Set up some test image
image = np.zeros((400, 400), np.uint8)
image = cv2.circle(image, (160, 160), 80, 255, cv2.FILLED)
# Find contour, and determine original bounding rectangle
cnt_orig = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]
x, y, w, h = cv2.boundingRect(cnt_orig[0])
print('Original bounding rectangle: ', x, y, w, h)
# Downsize image
image_small = cv2.resize(image.copy(), (124, 287))
# Determine scaling factors
scale_x = image.shape[1] / image_small.shape[1]
scale_y = image.shape[0] / image_small.shape[0]
# Find contour, and determine reconstructed bounding rectangle w.r.t. the scaling factors
cnt_small = cv2.findContours(image_small, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]
x, y, w, h = cv2.boundingRect(cnt_small[0]) * np.array([scale_x, scale_y, scale_x, scale_y])
print('Reconstructed bounding rectangle: ', x, y, w, h)
Output:
Original bounding rectangle: 80 80 161 161
Reconstructed bounding rectangle: 80.64... 79.44... 161.29... 161.67...
Notice: The used test image is very simple. The (in)accuracy might increase when finding more complex contours in more complex images.
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.16299-SP0
Python: 3.8.5
NumPy: 1.19.4
OpenCV: 4.4.0
----------------------------------------

Image watermarking on specific position in an image in python

Currently I am working with an image processing project in which I need to split the image into several segments and then apply watermark on each of the segment.
I have written a code which divides the image into segments by masking. You may find the code here. Now i want to implement watermark on each of these segments. The tutorial for watermarking can be found here.
How am I supposed to do that?
Please help as I am new to OpenCV and Python.
Feel free to ask for any further information needed to solve this.
Thank you!
EDIT
I am adding some code for your inference:
`
segment= 'segment storing location'
image = cv2.imread(image path)
segments = slic(img_as_float(image),compactness= 100.0, n_segments = 10, sigma = 5) #segmentation of image
row, col, _ = image.shape
for (i, segVal) in enumerate(np.unique(segments)):
# construct a mask for the segment
print "[x] inspecting segment %d" % (i)
mask = np.zeros(image.shape[:2], dtype = "uint8")
mask[segments == segVal] = 255 #masking image with different mask to create unique segments
bb= (cv2.bitwise_and(image, image, mask = mask) )
cv2.imwrite(segment + str(i) + ".png",bb) #save image segments created
`
Now after saving the segments, I need to watermark each one of them by calling them one after another. This is the code for watermarking:
import numpy as np
import cv2
import os
wk= 'D:\\watermark\\wm.png'
input_im= 'D:\\watermark\\input\\image_01.jpg'
op= 'D:\\watermark\\output'
alpha = 0.25
watermark = cv2.imread(wk, cv2.IMREAD_UNCHANGED)
(wH, wW) = watermark.shape[:2]
image = cv2.imread(input_im)
(h, w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
overlay = np.zeros((h, w, 4), dtype="uint8")
overlay[h - wH - 500:h - 500, w - wW - 500:w - 500] = watermark #This is the line where we can set the watermark's coordinates
output = image.copy()
cv2.addWeighted(overlay,alpha, output, 1.0, 0, output)
filename = input_im[input_im.rfind(os.path.sep) + 1:]
p = os.path.sep.join((op, filename))
cv2.imwrite(p, output)
Now how can I extract the coordinates of this segment in order to watermark it?
Edit
This is what I get when the lines
`cv2.circle(im, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(im, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2`
are kept outside the loop:
And this is what I get when they are executed within the loop:
You need to find the countour of the image (I've downloaded your segment image to try this), then compute the center of the contour.
To find the contour, you need to convert the image to gray scale and threshold it, dividing totally black pixels (black background) from non-black ones (your segment).
Finding the center of the segment
The only assumption I've made is that the pixel values of your segments are different from 0 (total black). This assumption may be invalid but, since you're working with photos of natural landscape (like the one you posted) this should not be a problem.
Feel free to ask for further details.
import numpy as np
import cv2
im = cv2.imread('try.png')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,1,255,0) # Threshold to highlight non black pixels
image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
cv2.drawContours(im, [c], -1, (0, 255, 0), 2)
cv2.circle(im, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(im, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# show the image
cv2.imshow("Image", im)
cv2.waitKey(0)
This is what I get:
Placing the watermark
Let's say you have the coordinates of the center of the segment region. Knowing the size of the watermark you can convert them coordinates locating the point of the image where to put the left upper corner of the watermark. In this example I assume that them are (x=10,y=10).
I've reused the last image you posted (I'm not drawing the contours, just the watermark).
import numpy as np
import cv2 as cv
# Coordinates where to put the watermark (left upper corner)
cy = 10
cx = 10
# Reading the image
image = cv.imread("try.png")
(h,w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
# Reading the watermark
watermark = cv.imread("watermark.png", cv.IMREAD_UNCHANGED)
(wH, wW) = watermark.shape[:2]
(B, G, R, A) = cv.split(watermark)
B = cv.bitwise_and(B, B, mask=A)
G = cv.bitwise_and(G, G, mask=A)
R = cv.bitwise_and(R, R, mask=A)
watermark = cv.merge([B, G, R, A])
# Creating the image's overlay with the watermark
overlay = np.zeros((h, w, 4), dtype="uint8")
overlay[cy:wH + cy, cx:wW + cx] = watermark
# Applying the overlay
output = image.copy()
cv.addWeighted(overlay, 0.4, output, 1.0, 0, output)
cv.imshow("out", output)
cv.waitKey()

Having trouble with orientation detection in OpenCV

I am trying to make a computer vision script that detects the orientation of objects. It works a majority of the time, but it seems that it is not able to have the same success for certain images.
This script relies on blurring and Canny edge detection to find the contours.
Working example:
Part which it fails:
For the part where it fails, it two lines for one of the same shapes and it completely ignores one of the others shapes.
Main code:
import cv2
from imgops import imutils
import CVAlgo
z = 'am'
path = 'images/pca.jpg'
#path = 'images/pca2.jpg'
img = cv2.imread(path)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imutils.resize(img, height = 600)
imgray = imutils.resize(img, height = 600)
final = img.copy()
thresh, imgray = CVAlgo.filtering(img, imgray, z)
__ , contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Iterate through all contours
test = CVAlgo.cnt_gui(final, contours)
#cv2.imwrite('1.jpg', final)
cv2.imshow('thresh', thresh)
cv2.imshow('contours', final)
cv2.waitKey(0)
CVAlgo.py
import cv2
from numpy import *
from pylab import *
from imgops import imutils
import math
def invert_img(img):
img = (255-img)
return img
def canny(imgray):
imgray = cv2.GaussianBlur(imgray, (11,11), 200)
canny_low = 0
canny_high = 100
thresh = cv2.Canny(imgray,canny_low,canny_high)
return thresh
def cnt_gui(img, contours):
cnts = sorted(contours, key = cv2.contourArea, reverse = True)
for i in range(0,len(cnts)):
sel_cnts = sorted(contours, key = cv2.contourArea, reverse = True)[i]
area = cv2.contourArea(sel_cnts)
if area < 1000:
continue
# get orientation angle and center coord
center, axis,angle = cv2.fitEllipse(sel_cnts)
hyp = 100 # length of the orientation line
# Find out coordinates of 2nd point if given length of line and center coord
linex = int(center[0]) + int(math.sin(math.radians(angle))*hyp)
liney = int(center[1]) - int(math.cos(math.radians(angle))*hyp)
# Draw orienation
cv2.line(img, (int(center[0]),int(center[1])), (linex, liney), (0,0,255),5)
cv2.circle(img, (int(center[0]), int(center[1])), 10, (255,0,0), -1)
return img
def filtering(img, imgray, mode):
imgray = cv2.medianBlur(imgray, 11)
thresh = cv2.Canny(imgray,75,200)
return thresh, imgray
Does anyone know what the problem is? Anyone know how I can improve this script?
The shape that has not been detected is too close to the black background and as such its contour has been merged with the contour of the white object area. The second orientation you find in one of the objects is in fact the orientation of the outer contour. To circumvent some of this you can dilate or close the binary image after thresholding using the cv2.dilate function from: cv2.dilate.
I have a suggestion. Since you have extracted each of the object in
the image as a contour, try fitting an ellipse to each of them.
Then find the major axis of each of the ellipse.
Now find the angle of orientation of these major axis.

Categories