Raspberry Pi OpenCV-2.4.X USB camera distance detection - python

newb question here... I have been following this guide to detect the distance between an object and the camera.
Here is the code I am currently running:
# import the necessary packages
import numpy as np
import cv2
def find_marker(image):
# convert the image to grayscale, blur it, and detect edges
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
# find the contours in the edged image and keep the largest one;
# we'll assume that this is our piece of paper in the image
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = max(cnts, key = cv2.contourArea)
# compute the bounding box of the of the paper region and return it
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
# compute and return the distance from the maker to the camera
return (knownWidth * focalLength) / perWidth
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 24.0
# initialize the known object width, which in this case, the piece of
# paper is 11 inches wide
KNOWN_WIDTH = 11.0
# initialize the list of images that we'll be using
IMAGE_PATHS = ["images/2ft.png", "images/3ft.png", "images/4ft.png"]
# load the furst image that contains an object that is KNOWN TO BE 2 feet
# from our camera, then find the paper marker in the image, and initialize
# the focal length
image = cv2.imread(IMAGE_PATHS[0])
marker = find_marker(image)
focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
# loop over the images
for imagePath in IMAGE_PATHS:
# load the image, find the marker in the image, then compute the
# distance to the marker from the camera
image = cv2.imread(imagePath)
marker = find_marker(image)
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
# draw a bounding box around the image and display it
box = np.int0(cv2.cv.BoxPoints(marker))
cv2.drawContours(image, [box], -1, (0, 255, 0), 2)
cv2.putText(image, "%.2fft" % (inches / 12),
(image.shape[1] - 200, image.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.imshow("image", image)
cv2.waitKey(0)
It works. However, I am unsure as to how to use the code to detect distances between an object and the camera in real-time (video) instead of through a picture taken.

I currently am using a Tello drone with the same code. The difference is I use a video that interpolates contours into rectangles and tracks a model rocket launch. I think what you are looking for is the OpenCV code to understand the video frames. This youtube video uses the Tello video feed and OpenCV to calculate a countour box for a face: https://www.youtube.com/watch?v=LmEcyQnfpDA&t=7253s.
import cv2
from tracker import *
import math, time, numpy as np
# global variables
_w, _h = 0,0
pid = [.01,.01,0]
pError = 0
cap = cv2.VideoCapture("rocketVideo.mp4")
loop = True
rocketPositionList = []
rocketPositionListArea = []
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 480
# initialize the known object width, which in this case, the piece of
# paper is 12 inches wide
KNOWN_WIDTH = 5
inches = 0
# distance tracker from Tracker.py
tracker = EuclideanDistTracker()
#object detector
object_detector = cv2.createBackgroundSubtractorMOG2(history=4000,varThreshold=330)
# calulate frame data to get rocket positions in frame
def getFrameCalculation(frame):
if len(rocketPositionListArea) != 0:
i = rocketPositionListArea.index(max(rocketPositionListArea))
return frame, [rocketPositionList[i], rocketPositionListArea[i]]
else:
return frame, [[0,0], 0]
def distance_to_camera(perWidth):
# compute and return the distance from the maker to the camera
return (KNOWN_WIDTH * focalLength) / perWidth
while loop:
# Start Reading OpenCV Video Frame
ret, frame = cap.read()
height, width, _ = frame.shape
w = width
h = height
#extract region of interst
roi = frame[0:1110,0:720]
#Object Detection
mask = object_detector.apply(roi)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
detections = []
for cnt in contours:
#Calcuate area and remove small elements
area = cv2.contourArea(cnt)
# Get Relative Area of Rocket Contour
if area > 100:
x,y,w,h = cv2.boundingRect(cnt)
if x > 210:
detections.append([x,y,w,h])
# update box ids from tracker->detections
boxes_ids = tracker.update(detections)
# create visual data for boxIds
for boxes_id in boxes_ids:
x,y,w,h, id = boxes_id
cv2.putText(roi,str(id),(x,y-15), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0),2)
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 3)
# get circle center and area
cx = x + w // 2
cy = y + h // 2
area = w * h
cv2.circle(roi,(cx,cy), 4, (0,0,255), cv2.FILLED)
rocketPositionList.append([cx,cy])
rocketPositionListArea.append(area)
# Get First Known Focal Length
focalLength = (rocketPositionListArea[0] * KNOWN_DISTANCE) / KNOWN_WIDTH
# calculate Img Frame in Video
frame, info = getFrameCalculation(frame)
feetOut = 0
if info[1] > 0:
# Get Inches from Distance to Camera
inches = distance_to_camera(info[1])
# give data to Tello to Operate Movement Action
feetOut = inches / 12
cv2.putText(roi,str(int(feetOut)) + "ft.",(50,50), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0),2)
# display cv2 videos
cv2.imshow("ROI",resizeR)
cv2.imshow("Mask",resizeM)
cv2.imshow("Frame", resizeF)
# clean up cv2 and exit
cap.release()
cv2.destroyAllWindows()
exit()

Related

Segmenting circle/ellipses from OMR bubble sheet image

I am trying to extract information about a single bubble from OMR bubble sheet to determine if its correctly marked or not.
Here is the Original bubble sheet :-
What I did :-
Got Wrapped Perspective of the section containing bubbles (say, img_wrap) img_wrap
Converted img_wrap to Grayscale and applied adaptive threshold (say, img_thresh) img_thresh
For segmenting it further to just single bubble I tried Blob detector method given here
However, I did not get expected results from step3 and couldn't find any other method or workaround for this
Complete Code :-
import cv2
import numpy as np
def extract_rect(contours): #Extracting rectangular area containing the bubbles
rect_contours = []
for c in contours:
if cv2.contourArea(c) > 10000:
perimeter = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02*perimeter, True) #approximates a curve or a polygon with another curve/polygon with less vertices so that the distance between them is less or equal to the specified precision. Uses Douglas-Peucker algorithm
# if len(approx) == 4: #cv2.boundingRect seems to be automatically taking care of this
# rect_contours.append(c)
rect_contours.append(c)
rect_contours = sorted(rect_contours, key=cv2.contourArea,reverse=True)
return rect_contours
def findlen(p1, p2):
length = ((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)**0.5 # length = sqroot[(x2-x1)**2 + (y2-y1)**2]
return length
def rect_points(rect_contour): #Finding corner points of rectangle extracted to further find wrapped perspective of the same in step 1
perimeter = cv2.arcLength(rect_contour, True)
approx = cv2.approxPolyDP(rect_contour, 0.02*perimeter, True)
print("APPROX")
print(type(approx))
print(approx)
cv2.drawContours(img, approx, -1, (100,10,55), 18)
cv2.drawContours(img, rect_contour, -1, (100,10,55), 1)
x, y, w, h = cv2.boundingRect(rect_contour)
print("printing x y w h")
print(x, y, w, h)
# Corner points of the rectangle further used to be used to wrap the rectangular section
point_1 = np.array([x, y])
point_2 = np.array([x+w, y])
point_3 = np.array([x, y+h])
point_4 = np.array([w, h])
paper_length = findlen(point_1, point_3)
paper_breadth = findlen(point_1, point_2)
if paper_breadth > paper_length: # here breadth should be smaller than length
temp = point_1.copy()
point_1 = point_3.copy()
point_3 = point_4.copy()
point_4 = point_2.copy()
point_2 = temp
corner_list = np.ndarray(shape=(4,2), dtype=np.int32)
np.append(corner_list, point_1)
np.append(corner_list, point_2)
np.append(corner_list, point_3)
np.append(corner_list, point_4)
print("corners list")
print(corner_list)
new_cornerlist = np.zeros((4, 1, 2), np.int32)
add = corner_list.sum(1)
# print(add)
# print(np.argmax(add))
new_cornerlist[0] = corner_list[np.argmin(add)] #[0,0]
new_cornerlist[3] = corner_list[np.argmax(add)] #[w,h]
diff = np.diff(corner_list, axis=1)
new_cornerlist[1] = corner_list[np.argmin(diff)] #[w,0]
new_cornerlist[2] = corner_list[np.argmax(diff)] #[h,0]
print(new_cornerlist.shape)
return new_cornerlist
def detect_blob(image): #Part of step 3
# Set our filtering parameters
# Initialize parameter setting using cv2.SimpleBlobDetector
params = cv2.SimpleBlobDetector_Params()
# Set Area filtering parameters
params.filterByArea = True
params.minArea = 90
# Set Circularity filtering parameters
params.filterByCircularity = True
params.minCircularity = 0.2
# Set Convexity filtering parameters
params.filterByConvexity = True
params.minConvexity = 0.7
# Set inertia filtering parameters
params.filterByInertia = True
params.minInertiaRatio = 0.05
# Create a detector with the parameters
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs
keypoints = detector.detect(image)
# Draw blobs on our image as red circles
blank = np.zeros((1, 1))
blobs = cv2.drawKeypoints(image, keypoints, blank, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
number_of_blobs = len(keypoints)
text = "Number of Circular Blobs: " + str(len(keypoints))
cv2.putText(blobs, text, (20, 550), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 100, 255), 2)
cv2.imshow("Filtering Circular Blobs Only", blobs)
cv2.waitKey(0)
img_path = 'bubblesheet.jpg' #Path of the input bubble image
img = cv2.imread(img_path)
print(img.shape) # Original size is 1600 * 1200 and 3 color channels
img_width = 600
img_height = 600
img = cv2.resize(img, (img_width, img_height), interpolation=cv2.INTER_AREA)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # grayscaled image Grayscaling bcoz further for edge detection we only require intensity info., and similar intensity pixels to detect the contours
img_blur = cv2.GaussianBlur(img_gray, (5,5), 0) # blurred image
cv2.imshow('Blurred', img_blur)
img_canny = cv2.Canny(img_blur, 20, 110) # Edge detection on processed image using Canny edge detection , binary thresholding could have been an alternative (i.e If the pixel value is smaller than the threshold, it is set to 0, otherwise it is set to a maximum value. )
cv2.imshow('Edge detection', img_canny)
contours, heirarchy = cv2.findContours(img_canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # Find contours.
img_contours = img.copy()
cv2.drawContours(img_contours, contours, -1, (0,255,0), 1) #parameters are (image, contours, countour_idx, contour_color, contour_thickness) . contour_idx is -1 for all contours
cv2.imshow('Contours', img_contours)
rect_contours = extract_rect(contours)
cv2.drawContours(img, rect_contours[1], -1, (0,255,0), 1)
wrap_points = rect_points(rect_contours[2])
if wrap_points.size != 0:
cv2.drawContours(img, wrap_points, -1, (0,0,255), 12)
wrap_points = rect_points(rect_contours[2])
cv2.drawContours(img, wrap_points, -1, (0,0,255), 12)
warp_img_width = int(img_width)
warp_img_height = int(img_height)
warp_from = np.float32(wrap_points)
warp_to = np.float32([[0,0], [warp_img_width, 0], [0, warp_img_height], [warp_img_width, warp_img_height]])
transformation_matrix = cv2.getPerspectiveTransform(warp_from, warp_to)
img_warp = cv2.warpPerspective(img, transformation_matrix, (warp_img_height, warp_img_height))
cv2.imshow('Wrapped Perspective', img_warp)
img_warp_gray = cv2.cvtColor(img_warp, cv2.COLOR_BGR2GRAY)
#Using adaptive threshold to get the best threshold value
img_thresh = cv2.adaptiveThreshold(img_warp_gray, 200, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
cv2.imshow('img_thresh', img_thresh)
detect_blob(img_thresh) #Attempt to Extract each bubble from the image
cv2.imshow('Original', img)
cv2.waitKey(0)

cv2 findArucoMarkers sees marker from far away but doesn't draw center point until closer

The Aruco marker is detected on the img but isn't fully detected because the cv2 drawing functions don't appear on the img until I'm closer to the marker.
I also have white boarders around the marker which helps a lot but this is different.
My code works but I don't understand why cv2 doesn't draw the center point of the marker from far away when there's already a green square detecting the marker from the findArucoMarkers() function.
Here's what I see using the simulation.
Doesn't see center point
See's center point when a "little" closer
import cv2
import cv2.aruco as aruco
def findArucoMarkers(img, markerSize=4, totalMarkers=50, draw=True):
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
key = getattr(aruco, f"DICT_{markerSize}X{markerSize}_{totalMarkers}")
arucoDict = aruco.Dictionary_get(key)
arucoParam = aruco.DetectorParameters_create()
aruco.detectMarkers(imgGray, arucoDict, parameters=arucoParam)
bboxs, ids, rejected = aruco.detectMarkers(imgGray, arucoDict, parameters=arucoParam)
# print(ids)
if draw:
aruco.drawDetectedMarkers(img, bboxs)
return [bboxs, ids]
def marker_finder():
# Gets img from simulator for vc to read
sendDeviceImage(robot, display)
cap = cv2.VideoCapture("display.jpg")
# Load Image
ret, img = cap.read()
findArucoMarkers(img, 4, 100, True)
arucoFound = findArucoMarkers(img)
# Loop through all the markers and augments
if len(arucoFound[0]) != 0:
for bbox, ids in zip(arucoFound[0], arucoFound[1]):
print(f"ID: {ids}")
# Get sides of marker
x, y, w, h = cv2.boundingRect(bbox)
# Get center point of b-rect
c_x = x + w // 2
c_y = y + h // 2
center = [c_x, c_y]
print(center)
radius = 2
cv2.circle(img, center, radius, (255, 255, 0), 2)
cv2.imshow("Img", img)
cv2.waitKey(33)
# Removes img from simulator
cleanup()```

limit scan area in opencv python [duplicate]

I'm working on a project using Python(3.7) and OpenCV in which I have an Image(captured using the camera) of a document with a QR code placed on it.
This QR code has 6 variables respectively as:
Size of QR code image
Top
Right
Bottom
Left
Unit
Latest Update:
Here are the steps I need to perform in the same order:
Detect the qr code & decode it to read size values
So, if the size of QR-code(image) is not equal to the size which is mentioned inside it then scale the image to equal both size values.
Then crop the image towards all sides from QR code image according to the values mentioned inside qr code.
I have tried this code:
def decodeAndCrop(inputImage):
print(str(inputImage))
image = cv2.imread(str(inputImage))
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(",")
print("qr data from fucntion: {}".format(qr_data))
if points is not None:
pts = len(points)
# print(pts)
for i in range(pts):
nextPointIndex = (i + 1) % pts
if str(inputImage) == "scaled_img.jpg":
cv2.line(
image,
tuple(points[i][0]),
tuple(points[nextPointIndex][0]),
(255, 0, 0),
5,
)
print(points[i][0])
width = int(
math.sqrt(
(points[0][0][0] - points[1][0][0]) ** 2
+ (points[0][0][1] - points[1][0][1]) ** 2
)
)
height = int(
math.sqrt(
(points[1][0][0] - points[2][0][0]) ** 2
+ (points[1][0][1] - points[2][0][1]) ** 2
)
)
print("height and width after scaling: {} {}".format(height, width))
if not str(inputImage) == "scaled_img.jpg":
scaled_img = None
if width == qr_data[0] and height == qr_data[0]:
print("Sizes are equal")
# Add the extension values to points and crop
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
roi = image[
y : y + height + int(qr_data[3]), x : x + width + int(qr_data[2])
]
scaled_img = cv2.imwrite("scaled_img.jpg", roi)
return scaled_img
else:
print(
"Width and height "
+ str(width)
+ "x"
+ str(height)
+ " not equal to "
+ str(qr_data[0])
+ "x"
+ str(qr_data[0])
)
if height > int(qr_data[0]):
scale_width = int(width) - int(qr_data[0])
scale_height = int(height) - int(qr_data[0])
print(f"scaled width: {scale_width} scaled height: {scale_height}")
dimension = (scale_width, scale_height)
scaled_img = cv2.resize(
image, dimension, interpolation=cv2.INTER_AREA
)
print("new img dims: {}".format(scaled_img.shape))
cv2.imshow("scaled image:", scaled_img)
cv2.imwrite("scaled_img.jpg", scaled_img)
elif height < int(qr_data[0]):
scale_width = int(qr_data[0]) - width
scale_height = int(qr_data[0] - height)
print(f"scaled width: {scale_width} scaled height: {scale_height}")
dimension = (scale_width, scale_height)
scaled_img = cv2.resize(
image, dimension, interpolation=cv2.INTER_AREA
)
print("new img dims: {}".format(scaled_img.shape))
cv2.imshow("scaled image:", scaled_img)
cv2.imwrite("scaled_img.jpg", scaled_img)
cv2.imshow("final output:", roi)
return scaled_img
else:
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
print(" x and y")
print(x)
print(y)
roi = image[
y : y + height + int(qr_data[3]), x : x + width + int(qr_data[2])
]
final_img = cv2.imwrite("finalized_image.jpg", roi)
cv2.imshow("finalized image:", final_img)
return final_img
if __name__ == "__main__":
image_to_crop = decodeAndCrop("example_input_1.jpg")
final_image = decodeAndCrop("scaled_img.jpg")
cv2.imshow("Cropped:", image_to_crop)
# cv2.imshow("Final: ", final_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
The code above gives an error as:
final_img = cv2.imwrite("finalized_image.jpg", roi)
cv2.error: OpenCV(4.2.0) /Users/travis/build/skvark/opencv-python/opencv/modules/imgcodecs/src/loadsave.cpp:715: error: (-215:Assertion failed) !_img.empty() in function 'imwrite'
End of Latest Update:
An example decoded information of a QR code is as: 100, 20, 40, 60, 20, px
Now, I need to detect the QR code from this document image and in the first step I need to compare the size of QR code in captured image of document with the size which is mentioned in the decoded information for example if in the captured image the size of the QR image is 90X90px and the size from decoded info is 100X100px we need to compare that.
Then, in the second step I have to crop the complete image by using the Top, Right, Bottom & Left variables accordingly. According to the above example we need to crop the image from the position of detected QR code to 20px Top, 40px Right, 60px Bottom and 20px Right. I have added an example Image below.
I have done to decode the QR code information but how can I take the detected QR code area as a seprate image and compare it's size with the mentioned size and then crop the Image accordingly?
Here's what I have tried so far:
import cv2
image = cv2.imread('/Users/abdul/PycharmProjects/QScanner/images/second.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = qr_data[0]
top = qr_data[1]
right = qr_data[2]
bottom = qr_data[3]
left = qr_data[4]
print(f'Size: {qr_size}' + str(qr_data[5]))
print(f'Top: {top}')
print(f'Right: {right}')
print(f'Bottom: {bottom}')
print(f'Left: {left}')
if points is not None:
pts = len(points)
print(pts)
for i in range(pts):
nextPointIndex = (i+1) % pts
cv2.line(image, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 5)
print(points[i][0])
print(decodedText)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")
Here's an example Image:
and here's a sample of input image:
Here's a simple approach using thresholding, morphological operations, and contour filtering.
Obtain binary image. Load image, grayscale, Gaussian blur, Otsu's threshold
Connect individual QR contours. Create a rectangular structuring kernel with cv2.getStructuringElement() then perform morphological operations with cv2.MORPH_CLOSE.
Filter for QR code. Find contours
and filter using contour approximation, contour area, and aspect ratio.
Detected QR code
Extracted QR code
From here you can compare the QR code with your reference information
Code
import cv2
import numpy as np
# Load imgae, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9,9), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Morph close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
# Find contours and filter for QR code
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
x,y,w,h = cv2.boundingRect(approx)
area = cv2.contourArea(c)
ar = w / float(h)
if len(approx) == 4 and area > 1000 and (ar > .85 and ar < 1.3):
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 3)
ROI = original[y:y+h, x:x+w]
cv2.imwrite('ROI.png', ROI)
cv2.imshow('thresh', thresh)
cv2.imshow('close', close)
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey()
I got the width and height data using points and compare it with the qr_data size. Then cropped the QR according to needed.
import cv2
import math
image = cv2.imread('/ur/image/directory/qr.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = qr_data[0]
top = qr_data[1]
right = qr_data[2]
bottom = qr_data[3]
left = qr_data[4]
if points is not None:
pts = len(points)
print(pts)
for i in range(pts):
nextPointIndex = (i+1) % pts
cv2.line(image, tuple(points[i][0]), tuple(points[nextPointIndex][0]), (255,0,0), 5)
print(points[i][0])
width = int(math.sqrt((points[0][0][0]-points[1][0][0])**2 + (points[0][0][1]-points[1][0][1])**2))
height = int(math.sqrt((points[1][0][0]-points[2][0][0])**2 + (points[1][0][1]-points[2][0][1])**2))
# Compare the size
if(width==qr_data[0] and height==qr_data[0]):
print("Sizes are equal")
else:
print("Width and height " + str(width) + "x" + str(height) + " not equal to "
+ str(qr_data[0]) + "x" + str(qr_data[0]))
# Add the extension values to points and crop
y = int(points[0][0][1]) - int(qr_data[1])
x = int(points[0][0][0]) - int(qr_data[4])
roi = image[y:y+height + int(qr_data[3]), x:x+width + int(qr_data[2])]
print(decodedText)
cv2.imshow("Image", image)
cv2.imshow("Crop", roi)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")
Result:
So, you mainly have 3 problems here.
If the image is rotated with an angle \theta,
If the sheet is one a plane. (i.e., in the images, the upper line doesn't seem to be linear. But it should not be a big deal.)
The black borders. Will you always have those or may it be a different background? This is important because without cropping out those, you won't be able to get a reasonable result.
I improved your code a little bit and removed the border pixels:
import cv2
import matplotlib.pyplot as plt
import math
import numpy as np
image = cv2.imread('/Users/samettaspinar/Public/im.jpg')
qrCodeDetector = cv2.QRCodeDetector()
decodedText, points, _ = qrCodeDetector.detectAndDecode(image)
qr_data = decodedText.split(',')
qr_size = int(qr_data[0])
top = int(qr_data[1])
right = int(qr_data[2])
bottom = int(qr_data[3])
left = int(qr_data[4])
print(f'Size: {qr_size}' + str(qr_data[5]))
print(f'Top: {top}')
print(f'Right: {right}')
print(f'Bottom: {bottom}')
print(f'Left: {left}')
plt.imshow(image)
plt.show()
dists = [] #This is for estimating distances between corner points.
#I will average them to find ratio of pixels in image vs qr_size
#in the optimal case, all dists should be equal
if points is not None:
pts = len(points)
for i in range(pts):
p1 = points[i][0]
p2 = points[(i+1) % pts][0]
dists.append(math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2))
print('line', tuple(p1), tuple(p2))
image = cv2.line(image, tuple(p1), tuple(p2), (255,0,0), 5)
else:
print("QR code not detected")
print('distances: ', dists)
# Remove the black border pixels. I had a simple idea for this
# Get the average intensity of the gray image
# If count the row average of the first half that are less than intensity/2.
# It approx gives number of black borders on the left. etc.
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
inten = np.mean(gray)
x = np.mean(gray, axis=0) # finds the vertical average
y = np.mean(gray, axis=1) # finds horizontal average
bl_left = np.sum([x[:int(col/2)] < inten/2])
bl_right = np.sum([x[int(col/2)+1:] < inten/2])
bl_top = np.sum([y[:int(row/2)] < inten/2])
bl_bottom = np.sum([y[int(row/2)+1:] < inten/2])
print('black margins: ', bl_left, bl_right, bl_top, bl_bottom)
# Estimate how many pixel you will crop out
ratio = np.mean(dists)/ int(qr_size)
print('actual px / qr_size in px: ', ratio)
row,col,dim = image.shape
top, left, right, bottom = int(top*ratio), int(left*ratio), int(right*ratio), int(bottom*ratio)
top += bl_top
left += bl_left
right += bl_right
bottom += bl_bottom
print('num pixels to be cropped: ', top, left, right, bottom)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image2 = image[top:row-bottom, left:col-right, :]
plt.imshow(image2)
plt.show()
Notice that I ignored the rotation issue. If there is rotation, you can find the angle by calculating the tangents/arctan where I calculated the distances.
For QR detection and parsing
import cv2
import sys
filename = sys.argv[1]
# read the QRCODE image
#in case if QR code is not black/white it is better to convert it into grayscale
img = cv2.imread(filename, 0)# Zero means grayscale
img_origin = cv2.imread(filename)
# initialize the cv2 QRCode detector
detector = cv2.QRCodeDetector()
# detect and decode
data, bbox, straight_qrcode = detector.detectAndDecode(img)
# if there is a QR code
if bbox is not None:
print(f"QRCode data:\n{data}")
# display the image with lines
# length of bounding box
n_lines = len(bbox[0])#Cause bbox = [[[float, float]]], we need to convert fload into int and loop over the first element of array
bbox1 = bbox.astype(int) #Float to Int conversion
for i in range(n_lines):
# draw all lines
point1 = tuple(bbox1[0, [i][0]])
point2 = tuple(bbox1[0, [(i+1) % n_lines][0]])
cv2.line(img_origin, point1, point2, color=(255, 0, 0), thickness=2)
# display the result
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print("QR code not detected")

Python OpenCV sorting contours in clockwise

I'm putting together an image processing tool to follow the deformation of a part using images. The part has rectangular markers that get detected with image segmentation and cv2.findContours function. Contour centers are then used to calculate distances and to bend radiuses. Everything seems to work fine, but I found out that the contours aren't sorted how I would like to sort them when reviewing results.
The part is repeatedly bent, and the contours are positioned in a circle.
I found this article that describes the sorting horizontally and vertically:
https://www.pyimagesearch.com/2015/04/20/sorting-contours-using-python-and-opencv/
Does anyone have any idea how to sort the contours in a clockwise direction?
The code is below.
import os
import exifread
import cv2
import numpy as np
import scipy
from matplotlib import pyplot as plt
import imutils
import pandas as pd
#---------- INPUT ----------
# Define the image filename
img_filename = 'frame397.jpg'
img_path = img_filename
# Define values for cropping
x = 0
y = 200
w = 1200
h = 800
# Define color values for segmentation
# the values can be probed with GIMP
h1 = 0
s1 = 70
v1 = 120
h2 = 255
s2 = 255
v2 = 255
red_lower = np.array([h1,s1,v1])
red_upper = np.array([h2,s2,v2])
# Define desired area size
# desired area size is pixel count - use GIMP for probe
s1 = 500
s2 = 10000
#---------- PROCESS IMAGES ----------
# Create an empty dataframe for storing results
# in shape of (image_name,time,angle,angle_smooth,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11)
# Define the results dataframe shape and column names
results_df = pd.DataFrame(columns=['image_name','alpha','r1','r2','r3','r4','r5','r6','r7','r8','r9','r10','r11',
'center_dist1', 'center_dist2','center_dist3','center_dist4',
'center_dist5','center_dist6','center_dist7','center_dist8',
'center_dist9','center_dist10','center_dist11'])
# Open image, make it black and white and find contours
img = cv2.imread(img_path)
crop = img[y:y+h, x:x+w]
blur = cv2.blur(crop,(2,2))
hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, red_lower, red_upper)
mask_copy = mask.copy()
cnts = cv2.findContours(mask_copy,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
#print cnts
x = []
y = []
# Loop through contours, calculate the centers and prepare the
#contours and contour centers display
#define the font for the text on the image
font = cv2.FONT_HERSHEY_SIMPLEX
for cnt in cnts:
area = cv2.contourArea(cnt)
moment = cv2.moments(cnt)
if s1<area<s2:
print area
c_x = int(moment["m10"]/moment["m00"])
c_y = int(moment["m01"]/moment["m00"])
#draw contours
cv2.drawContours(crop, cnt, -1, (0,255,0),3)
#draw a circle in the center of every contour, -1 is for thickness, this means
#that the cirlce will get filled in
cv2.circle(crop, (c_x,c_y), 10, (0,255,0),-1)
#display center coordinates on the image
string = str(c_x) + ',' + str(c_y)
cv2.putText(crop,string,(c_x,c_y),font,0.5,(255,255,255),2)
x.append(float(c_x))
y.append(float(c_y))
print (c_x, c_y)
print x
print y
# Display image
cv2.namedWindow('Contours', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Contours', 1200,900)
cv2.imshow('Contours', crop)
# Wait for windows closing
cv2.waitKey() & 0xFF
cv2.destroyAllWindows
Image is here:
I used openCV's minEnclosingCircle to "fit" a circle to the points (it's not actually a fit, but it's good enough for finding a point inside the curvature of the markers). Marking each contour with the angle from its centroid to the circle's center gave me a set of angles that I could sort with.
import cv2
import numpy as np
import math
# 2d distance
def dist2D(one, two):
dx = one[0] - two[0];
dy = one[1] - two[1];
return math.sqrt(dx*dx + dy*dy);
# angle between three points (the last point is the middle)
def angle3P(p1, p2, p3):
# get distances
a = dist2D(p3, p1);
b = dist2D(p3, p2);
c = dist2D(p1, p2);
# calculate angle // assume a and b are nonzero
# (law of cosines)
numer = c**2 - a**2 - b**2;
denom = -2 * a * b;
if denom == 0:
denom = 0.000001;
rads = math.acos(numer / denom);
degs = math.degrees(rads);
# check if past 180 degrees
if p1[1] > p3[1]:
degs = 360 - degs;
return degs;
# load image
img = cv2.imread("slinky.jpg");
# rescale
scale = 0.5;
h, w = img.shape[:2];
h = int(h * scale);
w = int(w * scale);
img = cv2.resize(img, (w,h));
# change color space
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB);
l,a,b = cv2.split(lab);
# threshold
thresh = cv2.inRange(a, 140, 255);
# get rid of little dots
kernel = np.ones((3,3),np.uint8)
thresh = cv2.erode(thresh,kernel,iterations = 1);
thresh = cv2.dilate(thresh,kernel, iterations = 1);
# contours
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
# get centroids
centroids = [];
centers = [];
for con in contours:
m = cv2.moments(con);
cx = int(m['m10'] / m['m00']);
cy = int(m['m01'] / m['m00']);
centers.append([cx, cy]);
centroids.append([[cx, cy], con]);
img = cv2.circle(img, (cx, cy), 10, (0,0,255), -1);
# find circle around points
# NOTE: this doesn't "fit" a circle to the points
# I'm just using this to find a "good enough" center
# that's in the direction of the curve
numped = np.array(centers);
(x, y), radius = cv2.minEnclosingCircle(numped);
img = cv2.circle(img, (int(x), int(y)), int(radius), (255,0,0), 2);
middle = [x,y];
offshoot = [x + 100, y];
# get angles
angles = [];
for cen in centroids:
center, contour = cen;
angle = angle3P(center, offshoot, middle);
angles.append([angle, center, contour]);
# sort by angle
final = sorted(angles, key = lambda a: a[0], reverse = True);
# pull out just the contours
contours = [clump[2] for clump in final];
# draw contours in order
marked = img.copy();
counter = 0;
for con in contours:
cv2.drawContours(marked, [con], -1, (0, 255, 0), 2);
cv2.imshow("marked", marked);
cv2.imwrite("marking_seq/" + str(counter) + ".png", marked);
counter += 1;
cv2.waitKey(0);
# show
cv2.imshow("orig", img);
cv2.imshow("a", a);
cv2.imshow("thresh", thresh);
cv2.waitKey(0);

Self-variating array size in openCV tracking program

So, for a school project i have to create an app that would make a tatoo appear on your arm.
At the moment openCV uses the color of the skin to detect which part of the image is skin.
My problem is this one: on the last step of the code, where the mask with the tattoo is merged to the video feed, the size of the array changes
#attempt to save the ROI coordinates
fy1=y1
fy2=y1+tatHeight
fx1=x1
fx2=x1+tatWidth
#create a ROI mask
roi = frame[fy1:fy2,fx1:fx2]
#merge the roi mask with the tatoo and the inverted tatoo masks
roi_bg = cv2.bitwise_and(roi,roi,mask = mask2inv)
roi_fg = cv2.bitwise_and(tatoo,tatoo,mask = mask2)
#merge the background and foreground ROI masks
dst = cv2.add(roi_bg,roi_fg)
# add the merged mask to the video feed
roiColor[fy1:fy2,fx1:fx2]=dst #the problem is here
I get this error
ValueError: could not broadcast input array from shape (33,2,3) into shape (0,0,3)
Could someone help me figure out why the value of fx and fy change?
You can find the repo with the full code here
Thanks to anyone that can help
EDIT : This is the website where I found some inspiration for my code
EDIT 2: Here is the code
# USAGE
# python ball_tracking.py --video ball_tracking_example.mp4
# python ball_tracking.py
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
#load tatoo image
imgTatoo=cv2.imread('mustache.png',-1)
tatMask=imgTatoo[:,:,3]
#create a mask from the image
invTatMask=cv2.bitwise_not(tatMask)
imgTatoo=imgTatoo[:,:,0:3]
#define original sizes for the tatoo
tatOrigHeight,tatOrigWidth = imgTatoo.shape[:2]
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (0, 0, 73)
greenUpper = (35, 93, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
frame = cv2.bilateralFilter(frame, 11, 17, 17)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
#draw contour of desired shape
cv2.drawContours( frame, c, -1, (239, 0, 0),6 )
#create the smallest box containing that contour
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
#draw the box
cv2.drawContours(frame,[box],0,(0,0,255),2)
#Save the box parameters (center,height,width and angle)
areaCenter=rect[0]
areaX,areaY=int(areaCenter[0]),int(areaCenter[1])
areaSize=rect[1]
areaHeight=int(areaSize[0])
areaWidth=int(areaSize[1])
areaAngle=rect[2]
#define the tattoo size
tatWidth=int(0.2*areaWidth)
tatHeight=tatWidth * tatOrigHeight // tatOrigWidth
#face = cv2.rectangle(frame,(areaX-areaWidth//4,areaY-areaHeight//4),(areaX+areaWidth//4,areaY+areaHeight//4),(255,0,0),2)
#roiGray=gray[areaY-areaHeight//2:areaY+areaHeight//2, areaX-areaWidth//2:areaX+areaWidth//2]
#create a mask from the video feed with the size of the region of interest (box created before)
roiColor=frame[areaY-areaHeight//2:areaY+areaHeight//2, areaX-areaWidth//2:areaX+areaWidth//2]
# print(areaX,areaY,areaWidth,areaHeight)
# print(tatWidth,tatHeight)
# save the center of the region of interest (ROI)
x1 = areaX - (tatWidth//2)
x2 = areaX + (tatWidth//2)
y1 = areaY - (tatHeight//2)
y2 = areaY + (tatHeight//2)
# protect from wierd center coordinates (outside of the frame)
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > areaWidth:
x2 = areaWidth
if y2 > areaHeight:
y2 = areaHeight
print(x1,x2,y1,y2)
# resize the tattoo to match the ROI size
tatHeight=tatWidth * tatOrigHeight // tatOrigWidth
tatWidth=x2-x1
# protect from wierd (negative) tatoo sizes
if tatHeight<=0:
tatHeight=1
if tatWidth<=0:
tatWidth=2
print(tatHeight)
print(tatWidth)
# resize all the masks to the same size in order to merge them
tatoo=cv2.resize(imgTatoo,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
mask2=cv2.resize(tatMask,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
mask2inv=cv2.resize(invTatMask,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
print(mask2inv.shape)
#attempt to save the ROI coordinates
fy1=y1
fy2=y1+tatHeight
fx1=x1
fx2=x1+tatWidth
#create a ROI mask
roi = frame[fy1:fy2,fx1:fx2]
print(roi.shape)
#merge the roi mask with the tatoo and the inverted tatoo masks
roi_bg = cv2.bitwise_and(roi,roi,mask = mask2inv)
roi_fg = cv2.bitwise_and(tatoo,tatoo,mask = mask2)
print(roi_bg.shape,roi_fg.shape)
#merge the background and foreground ROI masks
dst = cv2.add(roi_bg,roi_fg)
print("dst: ",dst.shape)
print("roi: ",roiColor.shape)
print(fy1,fy2,fy2-fy1)
print(fx1,fx2,fx2-fx1)
# add the merged mask to the video feed
roiColor[fy1:fy2,fx1:fx2]=dst
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
Ok so thanks to Dan Masek, if solved the problem by bypassing the roiColor that was useless with the following line:
frame[fy1:fy2,fx1:fx2]=dst
In order to be sure that the area that I want can be included in the original image.

Categories