Having trouble with orientation detection in OpenCV - python

I am trying to make a computer vision script that detects the orientation of objects. It works a majority of the time, but it seems that it is not able to have the same success for certain images.
This script relies on blurring and Canny edge detection to find the contours.
Working example:
Part which it fails:
For the part where it fails, it two lines for one of the same shapes and it completely ignores one of the others shapes.
Main code:
import cv2
from imgops import imutils
import CVAlgo
z = 'am'
path = 'images/pca.jpg'
#path = 'images/pca2.jpg'
img = cv2.imread(path)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imutils.resize(img, height = 600)
imgray = imutils.resize(img, height = 600)
final = img.copy()
thresh, imgray = CVAlgo.filtering(img, imgray, z)
__ , contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Iterate through all contours
test = CVAlgo.cnt_gui(final, contours)
#cv2.imwrite('1.jpg', final)
cv2.imshow('thresh', thresh)
cv2.imshow('contours', final)
cv2.waitKey(0)
CVAlgo.py
import cv2
from numpy import *
from pylab import *
from imgops import imutils
import math
def invert_img(img):
img = (255-img)
return img
def canny(imgray):
imgray = cv2.GaussianBlur(imgray, (11,11), 200)
canny_low = 0
canny_high = 100
thresh = cv2.Canny(imgray,canny_low,canny_high)
return thresh
def cnt_gui(img, contours):
cnts = sorted(contours, key = cv2.contourArea, reverse = True)
for i in range(0,len(cnts)):
sel_cnts = sorted(contours, key = cv2.contourArea, reverse = True)[i]
area = cv2.contourArea(sel_cnts)
if area < 1000:
continue
# get orientation angle and center coord
center, axis,angle = cv2.fitEllipse(sel_cnts)
hyp = 100 # length of the orientation line
# Find out coordinates of 2nd point if given length of line and center coord
linex = int(center[0]) + int(math.sin(math.radians(angle))*hyp)
liney = int(center[1]) - int(math.cos(math.radians(angle))*hyp)
# Draw orienation
cv2.line(img, (int(center[0]),int(center[1])), (linex, liney), (0,0,255),5)
cv2.circle(img, (int(center[0]), int(center[1])), 10, (255,0,0), -1)
return img
def filtering(img, imgray, mode):
imgray = cv2.medianBlur(imgray, 11)
thresh = cv2.Canny(imgray,75,200)
return thresh, imgray
Does anyone know what the problem is? Anyone know how I can improve this script?

The shape that has not been detected is too close to the black background and as such its contour has been merged with the contour of the white object area. The second orientation you find in one of the objects is in fact the orientation of the outer contour. To circumvent some of this you can dilate or close the binary image after thresholding using the cv2.dilate function from: cv2.dilate.

I have a suggestion. Since you have extracted each of the object in
the image as a contour, try fitting an ellipse to each of them.
Then find the major axis of each of the ellipse.
Now find the angle of orientation of these major axis.

Related

Is there any way to crop an image inside a box?

I want to crop the image only inside the box or rectangle. I tried so many approaches but nothing worked.
import cv2
import numpy as np
img = cv2.imread("C:/Users/hp/Desktop/segmentation/add.jpeg", 0);
h, w = img.shape[:2]
# print(img.shape)
kernel = np.ones((3,3),np.uint8)
img2 = img.copy()
img2 = cv2.medianBlur(img2,5)
img2 = cv2.adaptiveThreshold(img2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
img2 = 255 - img2
img2 = cv2.dilate(img2, kernel)
img2 = cv2.medianBlur(img2, 9)
img2 = cv2.medianBlur(img2, 9)
cv2.imshow('anything', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
position = np.where(img2 !=0)
x0 = position[0].min()
x1 = position[0].max()
y0 = position[1].min()
y1 = position[1].max()
print(x0,x1,y0,y1)
result = img[x0:x1,y0:y1]
cv2.imshow('anything', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Output should be the image inside the sqaure.
You can use contour detection for this. If your image has basically only a hand drawn rectangle in it, I think it's good enough to assume it's the largest closed contour in the image. From that contour, we can figure out a polygon/quadrilateral approximation and then finally get an approximate rectangle. I'll define some utilities at the beginning which I generally use to make my time easier when messing around with images:
def load_image(filename):
return cv2.imread(filename)
def bnw(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def col(image):
return cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
def fixrgb(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def show_image(image, figsize=(7,7), cmap=None):
cmap = cmap if len(image.shape)==3 else 'gray'
plt.figure(figsize=figsize)
plt.imshow(image, cmap=cmap)
plt.show()
def AdaptiveThresh(gray):
blur = cv2.medianBlur(gray, 5)
adapt_type = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
thresh_type = cv2.THRESH_BINARY_INV
return cv2.adaptiveThreshold(blur, 255, adapt_type, thresh_type, 11, 2)
def get_rect(pts):
xmin = pts[:,0,1].min()
ymin = pts[:,0,0].min()
xmax = pts[:,0,1].max()
ymax = pts[:,0,0].max()
return (ymin,xmin), (ymax,xmax)
Let's load the image and convert it to grayscale:
image_name = 'test.jpg'
image_original = fixrgb(load_image(image_name))
image_gray = 255-bnw(image_original)
show_image(image_gray)
Use some morph ops to enhance the image:
kernel = np.ones((3,3),np.uint8)
d = 255-cv2.dilate(image_gray,kernel,iterations = 1)
show_image(d)
Find the edges and enhance/denoise:
e = AdaptiveThresh(d)
show_image(e)
m = cv2.dilate(e,kernel,iterations = 1)
m = cv2.medianBlur(m,11)
m = cv2.dilate(m,kernel,iterations = 1)
show_image(m)
Contour detection:
contours, hierarchy = cv2.findContours(m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
total_area = np.prod(image_gray.shape)
max_area = 0
for cnt in contours:
# Simplify contour
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.03 * perimeter, True)
area = cv2.contourArea(approx)
# Shape is recrangular, so 4 points approximately and it's convex
if (len(approx) == 4 and cv2.isContourConvex(approx) and max_area<area<total_area):
max_area = cv2.contourArea(approx)
quad_polygon = approx
img1 = image_original.copy()
img2 = image_original.copy()
cv2.polylines(img1,[quad_polygon],True,(0,255,0),10)
show_image(img1)
tl, br = get_rect(quad_polygon)
cv2.rectangle(img2, tl, br, (0,255,0), 10)
show_image(img2)
So you can see the approximate polygon and the corresponding rectangle, using which you can get your crop. I suggest you play around with median blur and morphological ops like erosion, dilation, opening, closing etc and see which set of operations suits your images the best; I can't really say what's good from just one image. You can crop using the top left and bottom right coordinates:
show_image(image_original[tl[1]:br[1],tl[0]:br[0],:])
Draw the square with a different color (e.g red) so it can be distinguishable from other writing and background. Then threshold it so you get a black and white image: the red line will be white in this image. Get the coordinates of white pixels: from this set, select only the two pairs (minX, minY)(maxX,maxY). They are the top-left and bottom-right points of the box (remember that in an image the 0,0 point is on the top left of the image) and you can use them to crop the image.

How to crop the biggest object in image with python opencv?

I want to crop the biggest object in the image (Characters). This code only works if there is no line (shown in the first image). But I need to ignore the line and make the image of the second image. Only crop the biggest object image.
import cv2
x1, y1, w1, h1 = (0,0,0,0)
points = 0
# load image
img = cv2.imread('Image.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY)
# find where the signature is and make a cropped region
points = np.argwhere(thresh_gray==0) # find where the black pixels are
points = np.fliplr(points) # store them in x,y coordinates instead of row,col indices
x, y, w, h = cv2.boundingRect(points) # create a rectangle around those points
crop = img[y:y+h, x:x+w]
cv2.imshow('save.jpg', crop)
cv2.waitkey(0)
Input
Output:
You can use function findContours to do this.
For example, like this:
#!/usr/bin/env python
import cv2
import numpy as np
# load image
img = cv2.imread('Image.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature (INVERTED)
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, \
type=cv2.THRESH_BINARY_INV)
image, contours, hierarchy = cv2.findContours(thresh_gray,cv2.RETR_LIST, \
cv2.CHAIN_APPROX_SIMPLE)
# Find object with the biggest bounding box
mx = (0,0,0,0) # biggest bounding box so far
mx_area = 0
for cont in contours:
x,y,w,h = cv2.boundingRect(cont)
area = w*h
if area > mx_area:
mx = x,y,w,h
mx_area = area
x,y,w,h = mx
# Output to files
roi=img[y:y+h,x:x+w]
cv2.imwrite('Image_crop.jpg', roi)
cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2)
cv2.imwrite('Image_cont.jpg', img)
Note that I used THRESH_BINARY_INV instead of THRESH_BINARY.
Image_cont.jpg:
Image_crop.jpg:
You can also use this with skewed rectangles as #Jello pointed out. Unlike simpler solution above, this will correctly filter out diagonal lines.
For example:
#!/usr/bin/env python
import cv2
import numpy as np
# load image
img = cv2.imread('Image2.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature (INVERTED)
retval, thresh_gray = cv2.threshold(gray, 100, maxval=255, \
type=cv2.THRESH_BINARY_INV)
image, contours, hierarchy = cv2.findContours(thresh_gray,cv2.RETR_LIST, \
cv2.CHAIN_APPROX_SIMPLE)
def crop_minAreaRect(img, rect):
# Source: https://stackoverflow.com/questions/37177811/
# rotate img
angle = rect[2]
rows,cols = img.shape[0], img.shape[1]
matrix = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
img_rot = cv2.warpAffine(img,matrix,(cols,rows))
# rotate bounding box
rect0 = (rect[0], rect[1], 0.0)
box = cv2.boxPoints(rect)
pts = np.int0(cv2.transform(np.array([box]), matrix))[0]
pts[pts < 0] = 0
# crop and return
return img_rot[pts[1][1]:pts[0][1], pts[1][0]:pts[2][0]]
# Find object with the biggest bounding box
mx_rect = (0,0,0,0) # biggest skewed bounding box
mx_area = 0
for cont in contours:
arect = cv2.minAreaRect(cont)
area = arect[1][0]*arect[1][1]
if area > mx_area:
mx_rect, mx_area = arect, area
# Output to files
roi = crop_minAreaRect(img, mx_rect)
cv2.imwrite('Image_crop.jpg', roi)
box = cv2.boxPoints(mx_rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(200,0,0),2)
cv2.imwrite('Image_cont.jpg', img)
Image2.png (the input image):
Image_cont.jpg:
Image_crop.jpg:
If you use opencv-python 4.x, change image, contours, hierarchy to just contours, hierarchy.

Is there a function similar to OpenCV findContours that detects curves and replaces points with a spline?

I am trying to take the below image, trace the white shape, and export the resulting path to pdf. The problem I have is that findContours seeming only finds points along the edge of the shape. Is there a solution out there, similar to findContours, that detects curves in a shape and replaces its points with a spline wherever there is a curve? If I use scipy.interpolate it ignores straight lines and turns the entire contour into one big curved shape, which is no good either. I need something that does both things.
import numpy as np
import cv2
from scipy.interpolate import splprep, splev
from pyx import *
import matplotlib.pyplot as plt
#read in image file
original = cv2.imread('test.jpg')
#blur the image to smooth edges
im = cv2.medianBlur(original,5)
#threshold the image
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,170,255,cv2.THRESH_BINARY)
#findContours
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_\
APPROX_SIMPLE)
#drawContours
cv2.drawContours(original, [approx], -1, (0,255,0), 3)
cv2.imshow("Imageee", original)
cv2.waitKey(0)
Except using cv2.findContours with flag cv2.CHAIN_APPROX_SIMPLE to approx the contours, we can do it manually.
use cv2.findContours with flag cv2.CHAIN_APPROX_NONE to find contours.
use cv2.arcLength to calculate the contour length.
use cv2.approxPoolyDP to approx the contour manually with epsilon = eps * arclen.
Here is one of the results when eps=0.005:
More results:
#!/usr/bin/python3
# 2018.01.04 13:01:24 CST
# 2018.01.04 14:42:58 CST
import cv2
import numpy as np
import os
img = cv2.imread("test.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,threshed = cv2.threshold(gray,170,255,cv2.THRESH_BINARY)
# find contours without approx
cnts = cv2.findContours(threshed,cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)[-2]
# get the max-area contour
cnt = sorted(cnts, key=cv2.contourArea)[-1]
# calc arclentgh
arclen = cv2.arcLength(cnt, True)
# do approx
eps = 0.0005
epsilon = arclen * eps
approx = cv2.approxPolyDP(cnt, epsilon, True)
# draw the result
canvas = img.copy()
for pt in approx:
cv2.circle(canvas, (pt[0][0], pt[0][1]), 7, (0,255,0), -1)
cv2.drawContours(canvas, [approx], -1, (0,0,255), 2, cv2.LINE_AA)
# save
cv2.imwrite("result.png", canvas)
I think your problem actually consists of two issues.
The first issue is to extract the contour, which you can achieve using teh findContour function:
import numpy as np
print cv2.__version__
rMaskgray = cv2.imread('test.jpg', 0)
(thresh, binRed) = cv2.threshold(rMaskgray, 200, 255, cv2.THRESH_BINARY)
_, Rcontours, hier_r = cv2.findContours(binRed,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
r_areas = [cv2.contourArea(c) for c in Rcontours]
max_rarea = np.argmax(r_areas)
CntExternalMask = np.ones(binRed.shape[:2], dtype="uint8") * 255
contour= Rcontours[max_rarea]
cv2.drawContours(CntExternalMask,[contour],-1,0,1)
print "These are the contour points:"
print c
print
print "shape: ", c.shape
for p in contour:
print p[0][0]
cv2.circle(CntExternalMask, (p[0][0], p[0][1]), 5, (0,255,0), -1)
cv2.imwrite("contour.jpg", CntExternalMask)
cv2.imshow("Contour image", CntExternalMask)
cv2.waitKey(0)
If you execute the program, the contour points are printed as a list of point coordinates.
The contour approximation method you choose influences the interpolation which is actually used (and the number of points found), as described here. I have added small dots at the points found with the approximation method cv2.CHAIN_APPROX_SIMPLE. You see that the straight lines are already approximated.
I may not fully have understood your second step, though. You want to omit some of those points, replacing point lists partially by splines. There might be different way to do this, depending on your final intention. Do you just want to replace the straight lines? If you replace curved parts, what is the margin of error you are allowing?
# import the necessary packages
import numpy as np
import argparse
import glob
import cv2
#For saving pdf
def save_pdf(imagename):
import img2pdf
# opening from filename
with open("output.pdf","wb") as f:
f.write(img2pdf.convert(imagename))
#for fouind biggest contours
def bigercnt(contours):
max_area=0
cnt=[]
for ii in contours:
area=cv2.contourArea(ii)
if area>max_area:
cnt = ii
return cnt
#STARTING
print ("Reading img.jpg file")
# load the image, convert it to grayscale, and blur it slightly
image = cv2.imread('img.jpg')
image = cv2.resize(image, (0,0), fx=0.5, fy=0.5)
print ("Converting it gray scale")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
print ("Bluring")
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
print ("Looking for edges" )
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
tight = cv2.Canny(blurred, 255, 250)
print ("Looking for contours")
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10))
close = cv2.morphologyEx(tight, cv2.MORPH_CLOSE, kernel)
_,contours, hierarchy = cv2.findContours( close.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print("Looking for big contour")
cnt = bigercnt(contours)
print ("Cropping found contour")
x,y,w,h = cv2.boundingRect(cnt)
croped_image = image[y:y+h,x:x+w]
img2 = np.zeros((h,w,4),np.uint8)
print ("Taking only pixels in countour and creating png")
for i in range(h):
for j in range(w):
#print (x+j, y+i)
#print cv2.pointPolygonTest(cnt, (x+j, y+i), False)
if cv2.pointPolygonTest(cnt, (x+j, y+i), False)==1:
#print True
img2[i,j] = [croped_image[i, j][0],croped_image[i, j][1],croped_image[i, j][2],255]
else:
img2[i,j] = [255,255,255,0]
print ("Showing output image")
# Show the output image
#cv2.imshow('croped', croped_image)
cv2.imshow('output', img2)
params = list()
params.append(cv2.IMWRITE_PNG_COMPRESSION)
params.append(8)
print ("Saving output image")
cv2.imwrite("output.png",img2,params)
print ("Finish:converted")
cv2.waitKey(0)
cv2.destroyAllWindows()

Fill area marked in black line

I am trying to find area contained within a black line in an image.
Here is the Sample starting image "photo.jpg":
Sample starting image "photo.jpg"
I have used OpenCV and SimpleCV for this.
Here is the code:
from SimpleCV import Camera, Display, Image, Color
import time
import cv2
import numpy as np
n_image = Image('photo.jpg')
n_image2 = n_image.crop(55, 72, 546, 276) #Crop X,Y,W,H
n_image2.save('photo_2.jpg')
imagea = Image("photo_2.jpg")
greya = imagea.stretch(50).invert() #50=Blackness level of Black
greya.show()
greya.save('photo_2-GREY.jpg')
im = cv2.imread('photo_2-GREY.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,220,255,0)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
largest_areas = sorted(contours, key=cv2.contourArea)
cv2.drawContours(im, [largest_areas[-2]], 0, (255,255,255,255), -1)
cv2.drawContours(im,contours,-1,(255,255,255),-1)
cv2.imshow('Image Window',im)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('photo_3.jpg',im)
n_image = Image('photo_3.jpg')
mask = n_image.colorDistance((127, 127, 127))
mask.show()
mask.save('mask.jpg')
time.sleep(3)
binarised = mask.binarize()
blobs = binarised.findBlobs()
blobs.show(width=3)
time.sleep(60)
individualareaofholes = blobs.area()
compositeareaofholes = sum(individualareaofholes)
orig_area = 132432
finalarea = (orig_area - compositeareaofholes)
res = round(((finalarea/orig_area)*100),0)
print "Area is %d" % res
Here is the image "mask.jpg" which is used for area calculation:
Generated image "mask.jpg"
Observe:
1. the black patches inside the white area in "mask.jpg"
2. the white portion in the bottom left corner with the word "TAXI"
How do I eliminate them?
I just want everything enclosed within the black line to be gobbled up and everything outside the line not to be accounted for while calculating the area.
I think you are complicating your solution(I may be wrong). I tried to modify your code and get the area within the black boundary. Not sure if the area is correct but it will get you a way to fine-tune it.
import cv2
import numpy as np
n_image = cv2.imread('5GZ6X.jpg') # Your original image
imgray = cv2.cvtColor(n_image,cv2.COLOR_BGR2GRAY)
im_new = np.zeros_like(imgray)
ret,thresh = cv2.threshold(imgray,10,255,0)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
largest_areas = sorted(contours, key=cv2.contourArea)
cv2.drawContours(im_new, [largest_areas[-2]], 0, (255,255,255,255), -1)
image_masked = cv2.bitwise_and(imgray, imgray, mask=im_new)
area = cv2.contourArea(largest_areas[-2])
for contour in largest_areas:
areas = cv2.contourArea(contour)
if areas > 300:
print areas
print 'Complete area :' + str(n_image.shape[0] * n_image.shape[1])
print 'Area of selected region : ' + str(area)
cv2.imshow('main', image_masked)
cv2.waitKey(1000)
The result I got from this is
113455.5
135587.0
303849.0
Complete area :307200
Area of selected region : 135587.0
I got this image result after masking the image with the contour generated(largest contour)
Hope this helps! good luck :)

How to get rid of transparent background usign OpenCV 3 and Python 2.7?

I'm trying to remove the transparent background (the excess whitespace which is not visible here) from the last image. It looks like this:
The code which I'm using is as follows:
import cv2
import numpy as np
import os
from matplotlib import pyplot as plt
##Change directory to desktop
os.chdir("/home/meh/Desktop/")
##Reading the image
img_gray_scale = cv2.imread('img2.jpg',0)
img_colored = cv2.imread('img2.jpg',1)
###CONTOURS FOR IMAGE SEGMENTAITON####
##Gray scale image must be used
ret, thresh = cv2.threshold(img_gray_scale,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
####Extracting just the ROI
###First argument img is the source of image
###Second is the countours which should be passed as python list
###Third is index of contours (to draw all contours pass -1)
####remaining are color and thickness
mask2 = cv2.drawContours(thresh, contours, 0, (255,0,0), -1)
masked_data = cv2.bitwise_and(img_gray_scale,img_gray_scale, mask = mask2)
b,g,r = cv2.split(img_colored)
rgba = [b,g,r, thresh]
dst = cv2.merge(rgba,4)
cv2.imwrite('phone_original_without_background.png',dst)
dst = cv2.cvtColor(dst,cv2.COLOR_BGR2GRAY)
cv2.imwrite('phone_grayscale_without_background.png',dst)
My question is, how do I remove the transparent background and just keep the phone's image?
I tried your code and it seems to do nothing. Assuming that you want to crop out all the outer color pixels, here's my solution
Get all point of interest:
height,width = img_gray_scale.shape
fg = []
for col in range(width):
for row in range(height):
if thresh[row][col] < 255:
fg.append((col,row))
Get the minimal rectangle:
rotatedRect = cv2.minAreaRect(np.array(fg))
Use warpAffine to crop out the region of interest:
def subimage2(image, rotatedRect):
center, rotatedRect, angle = rotatedRect
width,height = int(shape[0]),int(shape[1])
# convert angle to radian and build affine transformation mat
theta = angle * np.pi/180
cosine,sine = np.cos(theta), np.sin(theta)
mapping = np.array([[cosine, sine, -center[0]+width/2],
[-sine, cosine, -center[1]+height/2]])
# write output
return cv2.warpAffine(image,mapping,(width,height))
cropped = subimage2(dst,rotatedRect)
And here's what we get

Categories