Decode barcodes & Qr codes using deep learning - python

Objective:
I have a drone and it has to detect and decode all barcodes and QR-codes(small or big) from 3 meters distance. Even though I have been able to achieve the same on a hand-held camera, the same does not work on a drone, as it's very hard to keep the drone constant without moving.
Research Done:
I surfed the internet and found a program by pyimagesearch, where it draws bounding boxes on the detected barcode. The program consists of two files namely,
1.simple_barcode_detection.py: Processes the received frame and return the bounding box locations of the barcode.
2.detect_barcode.py: Sends the input frames to simple_barcode_detection.py and displays the returned frames. note:(This program successfully drew bounding boxes on the barcode).
I am attaching the current status of the code which I have built from this.
simple_barcode_detection.py:
# import the necessary packages
import numpy as np
import cv2
import imutils
from pyzbar import pyzbar
import imutils
global text, box
def detect(image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# compute the Scharr gradient magnitude representation of the images
# in both the x and y direction using OpenCV 2.4
ddepth = cv2.cv.CV_32F if imutils.is_cv2() else cv2.CV_32F
gradX = cv2.Sobel(gray, ddepth=ddepth, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=ddepth, dx=0, dy=1, ksize=-1)
# subtract the y-gradient from the x-gradient
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# blur and threshold the image
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
# construct a closing kernel and apply it to the thresholded image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# perform a series of erosions and dilations
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
# find the contours in the thresholded image
cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# if no contours were found, return None
if len(cnts) == 0:
return None
# otherwise, sort the contours by area and compute the rotated
# bounding box of the largest contour
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
rect = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(rect) if imutils.is_cv2() else cv2.boxPoints(rect)
box = np.int0(box)
return box
# draw a bounding box arounded the detected barcode and display the frame
def process(image,box):
min_y = int(np.min(box[:,-1]))
max_y = int(np.max(box[:,-1]))
min_x = int(np.min(box[:,0]))
max_x = int(np.max(box[:,0]))
image = image[min_y:max_y, min_x:max_x]
barcodes = pyzbar.decode(image)
# Processing the barcode #
for barcode in barcodes:
(x, y, w, h) = barcode.rect
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
barcodeData = barcode.data.decode("utf-8")
barcodeType = barcode.type
text = "{} ({})".format(barcodeData, barcodeType)
return text
detect_barcode.py
# import the necessary packages
import simple_barcode_detection
from imutils.video import VideoStream
import argparse
import time
import cv2
from pyzbar import pyzbar
import imutils
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
args = vars(ap.parse_args())
# if the video path was not supplied, grab the reference to the
# camera
if not args.get("video", False):
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, load the video
else:
vs = cv2.VideoCapture(args["video"])
# keep looping over the frames
while True:
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
if frame is None:
break
box = simple_barcode_detection.detect(frame)
if box is not None:
cv2.putText(frame, text, (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
# cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# if we are not using a video file, stop the video file stream
if not args.get("video", False):
vs.stop()
# otherwise, release the camera pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
Error:
line 30, in <module> cv2.putText(frame, text, (x, y - 10), NameError: name 'text' is not defined
Adding to the same I also found a research paper, where yolo and pyzbar are used to identify and decode the barcode.
Question:
Is there any possibility that we can accept the returned bounding box locations and send them to pyzbar so that it scans that region for barcodes?
How does pyzbar work & can we create a similar library like pyzbar?
Reference images:
Drone not able to scan barcode
Drone not able to scan barcodes of this size
Links:
https://www.pyimagesearch.com/2014/12/15/real-time-barcode-detection-video-python-opencv/
https://d1wqtxts1xzle7.cloudfront.net/63796937/rahman-2019-ijais-451835_IJAIS20200701-113597-137d8us-with-cover-page-v2.pdf?Expires=1637915838&Signature=geCHJHKsaMnDCJMzNAz-OHHjxXSdX~rLtTf-MO0gGuNutOnHl5x33q8-Xiab4haQGnhuE8ektKO4Ah2REj9nebwfwnO3GYxBGRqZgqMsK63375AUQV7YsTjJz9Qwp~OwUa9st2h4a6~ep3eSwvCjCX-Dl-g612osElU6ATgdI4DGqqaaat-QuLAmjQqywXTZrTWs0~nLvVeZBLEJRnNbcphtlJPv1yM35Ix2AhiwKhax4X4qCqLR7Wzt3XR5IaW33X3zSPNoo0QhDLEZrEFG0m-Hi156fsq488mC4n6Udyoi8KNhaUxqQ316b7Zru7XF1z1UaBbGU44O4nuI5AtflA__&Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA

Related

OpenCV wont stream/update my video. How do I update the iimshow windows? [duplicate]

This question already has answers here:
What does OpenCV's cvWaitKey( ) function do?
(9 answers)
Closed 2 years ago.
I am having difficulty getting my video feed to work. I am trying to do simple object detection roughly following this tutorial, but have come across an issue. For some reason, the imshow windows aren't updating, they just keep showing the first frame. Any idea why? I am using cv2.VideoCapture and updating the frames every loop.
From what I can tell, the frames are successfully updating, as if I hold my hand up close to the camera, I can see the output values for frames changing down to [0,0,0,]ish, and when I take it away, they shoot back up as color comes back in.
Here is my code:
# Imports
from imutils.video import VideoStream
import numpy as np
import cv2
import imutils
import time
# CONSTANTS
MIN_AREA = 500
vs = cv2.VideoCapture(0)
#vs = VideoStream(src=0).start()
time.sleep(2)
firstFrame = None
secondFrame = None
while True:
frame = vs.read()
if frame[0] is False: # If read returned False, there was no frame to grab.
print("Error getting frame")
exit()
else: # Gets the image
frame = frame[1]
#Resize to make the image less intensive to process
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to gray to make the image easier to run through Gaussian blur.
gray = cv2.GaussianBlur(gray, (21, 21), 0) # Smooths out the pixels to get rid of any high variation between pixel intensities in a given region (x, x)
# Makes sure I am always comparing the last 2 frames in
if firstFrame is None:
print("Entered 1st")
firstFrame = gray
continue
elif secondFrame is None:
print("Entered 2nd")
secondFrame = gray
else:
print("Entered else")
firstFrame = secondFrame
secondFrame = gray;
# Compute Abs diffrence between current frame and first frame.
frameDelta = cv2.absdiff(firstFrame,secondFrame) # Simple subtraction of pixel intensities
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] # Thresholding the frameDelta. Only showing changes greater than x pixels, given by 2nd parameter argument.
thresh = cv2.dilate(thresh, None, iterations=2)
contours = cv2.findContours(thresh.copy(), cv2. RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
# Loop over the contours.
# If the current contour is too small, ignore it
for c in contours:
if cv2.contourArea(c) < MIN_AREA:
continue
# Else a bounding box is drawn around it
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Showing frames
cv2.imshow("Normal",frame)
cv2.imshow("Thresh",thresh)
cv2.imshow("Frame Delta", frameDelta)
vs.release()
cv2.destroyAllWindows()
Found the answer!
Apparently, I needed to add this code
if cv2.waitKey(1) & 0xFF == ord('q'):
break
Although I don't know why?

how to get an image from a camera using opencv without making a new .jpg file

I have this code to detect laser points using the open cv library and I had it working when I would feed it a .jpg or .png file as an augment but now I want to get an image from a camera. "video 0" I am using Ubuntu 16.04 here is my code I marked the problem with ******
any help would greatly be appreciated:
# import the necessary packages
from imutils import contours
from skimage import measure
import numpy as np
import argparse
import imutils
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=False,
help="path to the image file")
args = vars(ap.parse_args())
camera = cv2.VideoCapture(0)
#problem is here ********************************************
ret, image = camera.read()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (11, 11), 0)
#threshold the image to reveal light regions in the
# blurred image
thresh = cv2.threshold(blurred, 200, 255, cv2.THRESH_BINARY)[1]
# perform a series of erosions and dilations to remove
# any small blobs of noise from the thresholded image
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=4)
# perform a connected component analysis on the thresholded
# image, then initialize a mask to store only the "large"
# components
labels = measure.label(thresh, neighbors=8, background=0)
mask = np.zeros(thresh.shape, dtype="uint8")
# loop over the unique components
for label in np.unique(labels):
# if this is the background label, ignore it
if label == 0:
continue
# otherwise, construct the label mask and count the
# number of pixels
labelMask = np.zeros(thresh.shape, dtype="uint8")
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# if the number of pixels in the component is sufficiently
# large, then add it to our mask of "large blobs"
if numPixels > 300:
mask = cv2.add(mask, labelMask)
# find the contours in the mask, then sort them from left to
# right
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = contours.sort_contours(cnts)[0]
# loop over the contours
for (i, c) in enumerate(cnts):
# draw the bright spot on the image
(x, y, w, h) = cv2.boundingRect(c)
((cX, cY), radius) = cv2.minEnclosingCircle(c)
#x and y center are cX and cY
cv2.circle(image, (int(cX), int(cY)), int(radius),
(0, 0, 255), 3)
cv2.putText(image, "#{}".format(i + 1), (x, y - 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
Wrapping your camera capture in a While loop with a break condition might help:
import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
cv2.imshow('frame', frame)
# ADD LOGIC HERE
print(frame.shape)
# END
if cv2.waitKey(20) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Your is working fine & detect a face from video feed. But, you can do it another way...
'''
:: Face Detection using Haar Cascades ::
'''
import numpy as np
import cv2, argparse
# set classifiers
face_cascade = cv2.CascadeClassifier(
'/opt/opencv/main/data/haarcascades/haarcascade_frontalface_default.xml'
)
cam = cv2.VideoCapture(0)
_, img = cam.read()
# load image & convert
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find faces; If faces are found, it returns the positions
# of detected faces as Rect(x,y,w,h).
faces = face_cascade.detectMultiScale(gray, 1.2, 5)
print "[ INFO:1] Found ", len(faces), "face(s) in this image."
for (x, y, w, h) in faces:
cv2.rectangle(
img,
(x, y),
(x+w, y+h),
(255, 100, 25),
2
)
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()

Self-variating array size in openCV tracking program

So, for a school project i have to create an app that would make a tatoo appear on your arm.
At the moment openCV uses the color of the skin to detect which part of the image is skin.
My problem is this one: on the last step of the code, where the mask with the tattoo is merged to the video feed, the size of the array changes
#attempt to save the ROI coordinates
fy1=y1
fy2=y1+tatHeight
fx1=x1
fx2=x1+tatWidth
#create a ROI mask
roi = frame[fy1:fy2,fx1:fx2]
#merge the roi mask with the tatoo and the inverted tatoo masks
roi_bg = cv2.bitwise_and(roi,roi,mask = mask2inv)
roi_fg = cv2.bitwise_and(tatoo,tatoo,mask = mask2)
#merge the background and foreground ROI masks
dst = cv2.add(roi_bg,roi_fg)
# add the merged mask to the video feed
roiColor[fy1:fy2,fx1:fx2]=dst #the problem is here
I get this error
ValueError: could not broadcast input array from shape (33,2,3) into shape (0,0,3)
Could someone help me figure out why the value of fx and fy change?
You can find the repo with the full code here
Thanks to anyone that can help
EDIT : This is the website where I found some inspiration for my code
EDIT 2: Here is the code
# USAGE
# python ball_tracking.py --video ball_tracking_example.mp4
# python ball_tracking.py
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
#load tatoo image
imgTatoo=cv2.imread('mustache.png',-1)
tatMask=imgTatoo[:,:,3]
#create a mask from the image
invTatMask=cv2.bitwise_not(tatMask)
imgTatoo=imgTatoo[:,:,0:3]
#define original sizes for the tatoo
tatOrigHeight,tatOrigWidth = imgTatoo.shape[:2]
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
greenLower = (0, 0, 73)
greenUpper = (35, 93, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
frame = cv2.bilateralFilter(frame, 11, 17, 17)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
#draw contour of desired shape
cv2.drawContours( frame, c, -1, (239, 0, 0),6 )
#create the smallest box containing that contour
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
#draw the box
cv2.drawContours(frame,[box],0,(0,0,255),2)
#Save the box parameters (center,height,width and angle)
areaCenter=rect[0]
areaX,areaY=int(areaCenter[0]),int(areaCenter[1])
areaSize=rect[1]
areaHeight=int(areaSize[0])
areaWidth=int(areaSize[1])
areaAngle=rect[2]
#define the tattoo size
tatWidth=int(0.2*areaWidth)
tatHeight=tatWidth * tatOrigHeight // tatOrigWidth
#face = cv2.rectangle(frame,(areaX-areaWidth//4,areaY-areaHeight//4),(areaX+areaWidth//4,areaY+areaHeight//4),(255,0,0),2)
#roiGray=gray[areaY-areaHeight//2:areaY+areaHeight//2, areaX-areaWidth//2:areaX+areaWidth//2]
#create a mask from the video feed with the size of the region of interest (box created before)
roiColor=frame[areaY-areaHeight//2:areaY+areaHeight//2, areaX-areaWidth//2:areaX+areaWidth//2]
# print(areaX,areaY,areaWidth,areaHeight)
# print(tatWidth,tatHeight)
# save the center of the region of interest (ROI)
x1 = areaX - (tatWidth//2)
x2 = areaX + (tatWidth//2)
y1 = areaY - (tatHeight//2)
y2 = areaY + (tatHeight//2)
# protect from wierd center coordinates (outside of the frame)
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > areaWidth:
x2 = areaWidth
if y2 > areaHeight:
y2 = areaHeight
print(x1,x2,y1,y2)
# resize the tattoo to match the ROI size
tatHeight=tatWidth * tatOrigHeight // tatOrigWidth
tatWidth=x2-x1
# protect from wierd (negative) tatoo sizes
if tatHeight<=0:
tatHeight=1
if tatWidth<=0:
tatWidth=2
print(tatHeight)
print(tatWidth)
# resize all the masks to the same size in order to merge them
tatoo=cv2.resize(imgTatoo,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
mask2=cv2.resize(tatMask,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
mask2inv=cv2.resize(invTatMask,(tatWidth,tatHeight),interpolation=cv2.INTER_AREA)
print(mask2inv.shape)
#attempt to save the ROI coordinates
fy1=y1
fy2=y1+tatHeight
fx1=x1
fx2=x1+tatWidth
#create a ROI mask
roi = frame[fy1:fy2,fx1:fx2]
print(roi.shape)
#merge the roi mask with the tatoo and the inverted tatoo masks
roi_bg = cv2.bitwise_and(roi,roi,mask = mask2inv)
roi_fg = cv2.bitwise_and(tatoo,tatoo,mask = mask2)
print(roi_bg.shape,roi_fg.shape)
#merge the background and foreground ROI masks
dst = cv2.add(roi_bg,roi_fg)
print("dst: ",dst.shape)
print("roi: ",roiColor.shape)
print(fy1,fy2,fy2-fy1)
print(fx1,fx2,fx2-fx1)
# add the merged mask to the video feed
roiColor[fy1:fy2,fx1:fx2]=dst
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
Ok so thanks to Dan Masek, if solved the problem by bypassing the roiColor that was useless with the following line:
frame[fy1:fy2,fx1:fx2]=dst
In order to be sure that the area that I want can be included in the original image.

Object tracking in python using openCV

I'm trying to modify this code to allow tracking of multiple objects of the same color and draw a path where the object travel. Currenlty the code just tracks the largest object based on color and the travel path dissappears as the object moves around the video. Finally, I could use some guidance on how to create a new video file capturing the paths. This is my first post so I'm not sure if the code was posted correctly lol. Go easy on me ;)
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# object in the HSV color space, then initialize the
# list of tracked points
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in xrange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()

Raspberry Pi OpenCV-2.4.X USB camera distance detection

newb question here... I have been following this guide to detect the distance between an object and the camera.
Here is the code I am currently running:
# import the necessary packages
import numpy as np
import cv2
def find_marker(image):
# convert the image to grayscale, blur it, and detect edges
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
# find the contours in the edged image and keep the largest one;
# we'll assume that this is our piece of paper in the image
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = max(cnts, key = cv2.contourArea)
# compute the bounding box of the of the paper region and return it
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
# compute and return the distance from the maker to the camera
return (knownWidth * focalLength) / perWidth
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 24.0
# initialize the known object width, which in this case, the piece of
# paper is 11 inches wide
KNOWN_WIDTH = 11.0
# initialize the list of images that we'll be using
IMAGE_PATHS = ["images/2ft.png", "images/3ft.png", "images/4ft.png"]
# load the furst image that contains an object that is KNOWN TO BE 2 feet
# from our camera, then find the paper marker in the image, and initialize
# the focal length
image = cv2.imread(IMAGE_PATHS[0])
marker = find_marker(image)
focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
# loop over the images
for imagePath in IMAGE_PATHS:
# load the image, find the marker in the image, then compute the
# distance to the marker from the camera
image = cv2.imread(imagePath)
marker = find_marker(image)
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
# draw a bounding box around the image and display it
box = np.int0(cv2.cv.BoxPoints(marker))
cv2.drawContours(image, [box], -1, (0, 255, 0), 2)
cv2.putText(image, "%.2fft" % (inches / 12),
(image.shape[1] - 200, image.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.imshow("image", image)
cv2.waitKey(0)
It works. However, I am unsure as to how to use the code to detect distances between an object and the camera in real-time (video) instead of through a picture taken.
I currently am using a Tello drone with the same code. The difference is I use a video that interpolates contours into rectangles and tracks a model rocket launch. I think what you are looking for is the OpenCV code to understand the video frames. This youtube video uses the Tello video feed and OpenCV to calculate a countour box for a face: https://www.youtube.com/watch?v=LmEcyQnfpDA&t=7253s.
import cv2
from tracker import *
import math, time, numpy as np
# global variables
_w, _h = 0,0
pid = [.01,.01,0]
pError = 0
cap = cv2.VideoCapture("rocketVideo.mp4")
loop = True
rocketPositionList = []
rocketPositionListArea = []
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 480
# initialize the known object width, which in this case, the piece of
# paper is 12 inches wide
KNOWN_WIDTH = 5
inches = 0
# distance tracker from Tracker.py
tracker = EuclideanDistTracker()
#object detector
object_detector = cv2.createBackgroundSubtractorMOG2(history=4000,varThreshold=330)
# calulate frame data to get rocket positions in frame
def getFrameCalculation(frame):
if len(rocketPositionListArea) != 0:
i = rocketPositionListArea.index(max(rocketPositionListArea))
return frame, [rocketPositionList[i], rocketPositionListArea[i]]
else:
return frame, [[0,0], 0]
def distance_to_camera(perWidth):
# compute and return the distance from the maker to the camera
return (KNOWN_WIDTH * focalLength) / perWidth
while loop:
# Start Reading OpenCV Video Frame
ret, frame = cap.read()
height, width, _ = frame.shape
w = width
h = height
#extract region of interst
roi = frame[0:1110,0:720]
#Object Detection
mask = object_detector.apply(roi)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
detections = []
for cnt in contours:
#Calcuate area and remove small elements
area = cv2.contourArea(cnt)
# Get Relative Area of Rocket Contour
if area > 100:
x,y,w,h = cv2.boundingRect(cnt)
if x > 210:
detections.append([x,y,w,h])
# update box ids from tracker->detections
boxes_ids = tracker.update(detections)
# create visual data for boxIds
for boxes_id in boxes_ids:
x,y,w,h, id = boxes_id
cv2.putText(roi,str(id),(x,y-15), cv2.FONT_HERSHEY_PLAIN, 1, (255,0,0),2)
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 3)
# get circle center and area
cx = x + w // 2
cy = y + h // 2
area = w * h
cv2.circle(roi,(cx,cy), 4, (0,0,255), cv2.FILLED)
rocketPositionList.append([cx,cy])
rocketPositionListArea.append(area)
# Get First Known Focal Length
focalLength = (rocketPositionListArea[0] * KNOWN_DISTANCE) / KNOWN_WIDTH
# calculate Img Frame in Video
frame, info = getFrameCalculation(frame)
feetOut = 0
if info[1] > 0:
# Get Inches from Distance to Camera
inches = distance_to_camera(info[1])
# give data to Tello to Operate Movement Action
feetOut = inches / 12
cv2.putText(roi,str(int(feetOut)) + "ft.",(50,50), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0),2)
# display cv2 videos
cv2.imshow("ROI",resizeR)
cv2.imshow("Mask",resizeM)
cv2.imshow("Frame", resizeF)
# clean up cv2 and exit
cap.release()
cv2.destroyAllWindows()
exit()

Categories