Python Computer Vision Contours: Too many values to unpack? - python

I was taking source code from pyimagesearch.com to make a mobile document scanner and tried to test out the code. The edge detection part works but whenever I arrive at the part where it tries to find contours of an image, the program outputs an error saying that there are too many values to unpack, despite the programming working on the author's side.
What's the problem and how do I fix it?
Blog Post about the source code:
http://www.pyimagesearch.com/2014/09/01/build-kick-ass-mobile-document-scanner-just-5-minutes/?__vid=c35c22a06af30132982122000b2a88d7
Youtube video about the program:
https://www.youtube.com/watch?v=yRer1GC2298
Terminal Command in Ubuntu
python scan.py --image images/page.jpg
Result:
STEP 1: Edge Detection
Traceback (most recent call last):
File "scan.py", line 40, in <module>
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
ValueError: too many values to unpack
Code:
# USAGE
# python scan.py --image images/page.jpg
# import the necessary packages
from pyimagesearch.transform import four_point_transform
from pyimagesearch import imutils
from skimage.filter import threshold_adaptive
import numpy as np
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image to be scanned")
args = vars(ap.parse_args())
# load the image and compute the ratio of the old height
# to the new height, clone it, and resize it
image = cv2.imread(args["image"])
ratio = image.shape[0] / 500.0
orig = image.copy()
image = imutils.resize(image, height = 500)
# convert the image to grayscale, blur it, and find edges
# in the image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 75, 200)
# show the original image and the edge detected image
print "STEP 1: Edge Detection"
cv2.imshow("Image", image)
cv2.imshow("Edged", edged)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find the contours in the edged image, kee
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, ping only the
# largest ones, and initialize the screen contourcv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
# loop over the contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then we
# can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
# show the contour (outline) of the piece of paper
print "STEP 2: Find contours of paper"
cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
cv2.imshow("Outline", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# apply the four point transform to obtain a top-down
# view of the original image
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
# convert the warped image to grayscale, then threshold it
# to give it that 'black and white' paper effect
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
warped = threshold_adaptive(warped, 250, offset = 10)
warped = warped.astype("uint8") * 255
# show the original and scanned images
print "STEP 3: Apply perspective transform"
cv2.imshow("Original", imutils.resize(orig, height = 650))
cv2.imshow("Scanned", imutils.resize(warped, height = 650))
cv2.waitKey(0)

This is the answer at least works for me. The function return 3 values so:
_,contours,hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)

(_,cnts,hierarchy) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)

In OpenCV 3.0.0 (beta) they have added a return value. This works:
derp,contours,hierarchy = cv2.findContours(dilation.copy(),cv2.RETR_LIST ,cv2.CHAIN_APPROX_SIMPLE)
I have no idea what derp is and it can be ignored.

Related

openCV: Warp Perspective for various size of images

I am learning computer vision and trying to warp perspective of pictures of single paper for OCR. The sample picture is
I succeeded to binarize the image and detect contours. Yet I am having difficulty to wrap perspective based on the contours.
def display_cv_image(image, format='.png'):
"""
Display image from 2d array
"""
decoded_bytes = cv2.imencode(format, image)[1].tobytes()
display(Image(data=decoded_bytes))
def get_contour(img,original, thresh):
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 10000:
epsilon = 0.1*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
areas.append(approx)
cv2.drawContours(original,areas,-1,(0,255,0),3)
display_cv_image(original)
return areas[0]
def perspective(original, target):
dst = []
pts1 = np.float32(target)
pts2 = np.float32([[1000,2000],[1000,0],[0,0],[0,2000]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(original,M,(1000,2000))
display_cv_image(dst)
# Driver codes
original = cv2.imread('image.jpg')
thresh, grey = binarize(original)
target = get_contour(grey,original, thresh)
perspective(original, target)
The problem is pts2 in perspective function. I am trying multiple value for the variable but none of them works. I want to back calculate the map matrix and possibly make the function adaptive to various size of images.
A good description for four point perspective transform can be obtained from Adrian's tutorial: https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
There is a function four_point_transform in imutils module.
As far as the above picture is concerned, following is the code snippet to warp and binarize which can be used in OCR input.
import cv2
import numpy as np
from imutils.perspective import four_point_transform
import imutils
original = cv2.imread('image.jpg')
blurred = cv2.GaussianBlur(original, (3, 3), 0)
blurred_float = blurred.astype(np.float32) / 255.0
edgeDetector = cv2.ximgproc.createStructuredEdgeDetection('model.yml')
edged = edgeDetector.detectEdges(blurred_float)
edged = (255 * edged).astype("uint8")
edged = cv2.threshold(edged, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
screenCnt = approx
break
if len(screenCnt) == 4:
warped = four_point_transform(original, screenCnt.reshape(4, 2))
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
T = cv2.ximgproc.niBlackThreshold(warped, maxValue=255, type=cv2.THRESH_BINARY_INV, blockSize=81, k=0.1, binarizationMethod=cv2.ximgproc.BINARIZATION_WOLF)
warped = (warped > T).astype("uint8") * 255
cv2.imshow("Original", imutils.resize(original, height = 650))
cv2.imshow("Edged", imutils.resize(edged, height = 650))
cv2.imshow("Warped", imutils.resize(warped, height = 650))
cv2.waitKey(0)
Following are the original, edged and final warped binarized output:
Please note that StructuredEdgeDetection is used for better edge detection. You can download the model.yml file from the link: https://cdn.rawgit.com/opencv/opencv_extra/3.3.0/testdata/cv/ximgproc/model.yml.gz
Also note that Wolf & Julion binarization technique is used for better output.
You need to install opencv-contrib-python package through pip for StructuredEdgeDetection and niBlackThreshold.

Find contours based on edges

I want to detect the contours of an equipment label. Although the code runs correctly, it never quite detects the contours of the label.
Original Image
Using this code:
import numpy as np
import cv2
import imutils #resizeimage
import pytesseract # convert img to string
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Read the image file
image = cv2.imread('Car Images/5.JPG')
# Resize the image - change width to 500
image = imutils.resize(image, width=500)
# Display the original image
cv2.imshow("Original Image", image)
cv2.waitKey(0)
# RGB to Gray scale conversion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("1 - Grayscale Conversion", gray)
cv2.waitKey(0)
# Noise removal with iterative bilateral filter(removes noise while preserving edges)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
cv2.imshow("2 - Bilateral Filter", gray)
cv2.waitKey(0)
# Find Edges of the grayscale image
edged = cv2.Canny(gray, 170, 200)
cv2.imshow("3 - Canny Edges", edged)
cv2.waitKey(0)
# Find contours based on Edges
cnts, new = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Create copy of original image to draw all contours
img1 = image.copy()
cv2.drawContours(img1, cnts, -1, (0,255,0), 3)
cv2.imshow("4- All Contours", img1)
cv2.waitKey(0)
#sort contours based on their area keeping minimum required area as '30' (anything smaller than this will not be considered)
cnts=sorted(cnts, key = cv2.contourArea, reverse = True)[:30]
NumberPlateCnt = None #we currently have no Number plate contour
# Top 30 Contours
img2 = image.copy()
cv2.drawContours(img2, cnts, -1, (0,255,0), 3)
cv2.imshow("5- Top 30 Contours", img2)
cv2.waitKey(0)
# loop over our contours to find the best possible approximate contour of number plate
count = 0
idx =7
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# print ("approx = ",approx)
if len(approx) == 4: # Select the contour with 4 corners
NumberPlateCnt = approx #This is our approx Number Plate Contour
# Crop those contours and store it in Cropped Images folder
x, y, w, h = cv2.boundingRect(c) #This will find out co-ord for plate
new_img = gray[y:y + h, x:x + w] #Create new image
cv2.imwrite('Cropped Images-Text/' + str(idx) + '.png', new_img) #Store new image
idx+=1
break
# Drawing the selected contour on the original image
#print(NumberPlateCnt)
cv2.drawContours(image, [NumberPlateCnt], -1, (0,255,0), 3)
cv2.imshow("Final Image With Number Plate Detected", image)
cv2.waitKey(0)
Cropped_img_loc = 'Cropped Images-Text/7.png'
cv2.imshow("Cropped Image ", cv2.imread(Cropped_img_loc))
# Use tesseract to covert image into string
text = pytesseract.image_to_string(Cropped_img_loc, lang='eng')
print("Equipment Number is :", text)
cv2.waitKey(0) #Wait for user input before closing the images displayed
Displayed output
Is there a better way to narrow down the contour to the equipment label?
Here is the code for your reference on github:
https://github.com/AjayAndData/Licence-plate-detection-and-recognition---using-openCV-only/blob/master/Car%20Number%20Plate%20Detection.py
I think this code may help you
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('C:/Users/DELL/Desktop/download (5).png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,60,0.001,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),0,255,-1)
coord = np.where(np.all(img == (255, 0, 0),axis=-1))
plt.imshow(img)
plt.show()

Opencv find Contours on image

I writing a code that detect book on image. First step is find the contour on the image but i have problem with some books. Sometimes I can't detect the contours correctly ( A book is a rectangle so just find 4 contours) beacouse the line is not appointed correctly and i have gap beatwen them as show on image. Is there a way to extend the detected edges ?
This is my code:
imgg = cv2.imread('\book.jpg')
gray = cv2.cvtColor(imgg, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray , 10, 250)
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
total = 0
#binary = cv2.bitwise_not(gray)
for c in cnts:
area = cv2.contourArea(c)
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.03 * peri, True)
if (len(approx) == 4) and (area > 100000):
cv2.drawContours(imgg, [approx], -1, (0, 255, 0), 4)
cv2.imshow('image',imgg)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is a quick example of Thresholding, please remember to have a test.png in the same folder as the following script. Use it before applying findContours, should be a significant improvement. Otherwise google Otsu's Binarization.
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
img = cv.imread('test.png',0)
img = cv.medianBlur(img,5)
ret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)
th2 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_MEAN_C,\
cv.THRESH_BINARY,11,2)
th3 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv.THRESH_BINARY,11,2)
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1, th2, th3]
for i in xrange(4):
plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
Edge detection often works poorly. Here you discard highly relevant information, which is the color contrast.
Binarization of the saturation component will be much more effective.

Rect Module Python

I have recently downloaded some code in python that tries to scan a receipt(or prepare for scanning). I tried to run the code, but there seems to be a problem. Python doesn't recognize the module 'Rect'. I tried to download the module, but there is no such module available. I'm stuck and am wondering what to do.
Note: Only one line of code uses the module
Code :
import cv2
import numpy as np
import rect
# add image here.
image = cv2.imread('test_pic.jpg')
# resize image
# choose optimal dimensions
image = cv2.resize(image, (1500, 880))
# create copy of original image
orig = image.copy()
# convert to grayscale and blur to smooth
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gaussian blur to smoothen texture
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
#blurred = cv2.medianBlur(gray, 5)
# apply Canny Edge Detection
edged = cv2.Canny(blurred, 0, 50)
orig_edged = edged.copy()
# find the contours in the edged image
# keep only the largest ones, and
# initialize screen contour
(contours, _) = cv2.findContours(edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
#x,y,w,h = cv2.boundingRect(contours[0])
#cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),0)
# get approximate contour
for c in contours:
p = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * p, True)
if len(approx) == 4:
target = approx
break
# map target points to 800x800 quadrilateral
approx = rect.rectify(target)
pts2 = np.float32([[0,0],[800,0],[800,800],[0,800]])
M = cv2.getPerspectiveTransform(approx,pts2)
dst = cv2.warpPerspective(orig,M,(800,800))
cv2.drawContours(image, [target], -1, (0, 255, 0), 2)
dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
# use thresholding on warped image to get scanned effect (If Required)
ret,th1 = cv2.threshold(dst,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(dst,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(dst,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
ret2,th4 = cv2.threshold(dst,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# show results
cv2.imshow("Original.jpg", orig)
cv2.imshow("Original Gray.jpg", gray)
cv2.imshow("Original Blurred.jpg", blurred)
cv2.imshow("Original Edged.jpg", orig_edged)
cv2.imshow("Outline.jpg", image)
cv2.imshow("Thresh Binary.jpg", th1)
cv2.imshow("Thresh mean.jpg", th2)
cv2.imshow("Thresh gauss.jpg", th3)
cv2.imshow("Otsu's.jpg", th4)
cv2.imshow("dst.jpg", dst)
# other thresholding methods
"""
ret,thresh1 = cv2.threshold(dst,127,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(dst,127,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(dst,127,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(dst,127,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(dst,127,255,cv2.THRESH_TOZERO_INV)
cv2.imshow("Thresh Binary", thresh1)
cv2.imshow("Thresh Binary_INV", thresh2)
cv2.imshow("Thresh Trunch", thresh3)
cv2.imshow("Thresh TOZERO", thresh4)
cv2.imshow("Thresh TOZERO_INV", thresh5)
"""
cv2.waitKey(0)
cv2.destroyAllWindows()
In the github repo you presumably grabbed the code from, there's another file called rect.py with a single function rectify() that is used in the main program. In Python, if you create other .py modules, you can import them into your code for better encapsulation of certain functions, although it seems really unnecessary in this code to keep the rectify() function in a different file altogether. Something that's equally basic but more common is a .py file with all your functions, and then a main .py file which uses those functions.
Edit: so to be clear, the rect module is in the repo itself. A word of advice, generally clone a whole repo unless you know for sure that you don't need the other files in it.

Having trouble with orientation detection in OpenCV

I am trying to make a computer vision script that detects the orientation of objects. It works a majority of the time, but it seems that it is not able to have the same success for certain images.
This script relies on blurring and Canny edge detection to find the contours.
Working example:
Part which it fails:
For the part where it fails, it two lines for one of the same shapes and it completely ignores one of the others shapes.
Main code:
import cv2
from imgops import imutils
import CVAlgo
z = 'am'
path = 'images/pca.jpg'
#path = 'images/pca2.jpg'
img = cv2.imread(path)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imutils.resize(img, height = 600)
imgray = imutils.resize(img, height = 600)
final = img.copy()
thresh, imgray = CVAlgo.filtering(img, imgray, z)
__ , contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Iterate through all contours
test = CVAlgo.cnt_gui(final, contours)
#cv2.imwrite('1.jpg', final)
cv2.imshow('thresh', thresh)
cv2.imshow('contours', final)
cv2.waitKey(0)
CVAlgo.py
import cv2
from numpy import *
from pylab import *
from imgops import imutils
import math
def invert_img(img):
img = (255-img)
return img
def canny(imgray):
imgray = cv2.GaussianBlur(imgray, (11,11), 200)
canny_low = 0
canny_high = 100
thresh = cv2.Canny(imgray,canny_low,canny_high)
return thresh
def cnt_gui(img, contours):
cnts = sorted(contours, key = cv2.contourArea, reverse = True)
for i in range(0,len(cnts)):
sel_cnts = sorted(contours, key = cv2.contourArea, reverse = True)[i]
area = cv2.contourArea(sel_cnts)
if area < 1000:
continue
# get orientation angle and center coord
center, axis,angle = cv2.fitEllipse(sel_cnts)
hyp = 100 # length of the orientation line
# Find out coordinates of 2nd point if given length of line and center coord
linex = int(center[0]) + int(math.sin(math.radians(angle))*hyp)
liney = int(center[1]) - int(math.cos(math.radians(angle))*hyp)
# Draw orienation
cv2.line(img, (int(center[0]),int(center[1])), (linex, liney), (0,0,255),5)
cv2.circle(img, (int(center[0]), int(center[1])), 10, (255,0,0), -1)
return img
def filtering(img, imgray, mode):
imgray = cv2.medianBlur(imgray, 11)
thresh = cv2.Canny(imgray,75,200)
return thresh, imgray
Does anyone know what the problem is? Anyone know how I can improve this script?
The shape that has not been detected is too close to the black background and as such its contour has been merged with the contour of the white object area. The second orientation you find in one of the objects is in fact the orientation of the outer contour. To circumvent some of this you can dilate or close the binary image after thresholding using the cv2.dilate function from: cv2.dilate.
I have a suggestion. Since you have extracted each of the object in
the image as a contour, try fitting an ellipse to each of them.
Then find the major axis of each of the ellipse.
Now find the angle of orientation of these major axis.

Categories