Python3 Tracking movement with opencv2 - python

So, I've downloaded this source code from http://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
:
# import the necessary packages
import argparse
import datetime
import imutils
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500,
help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
But when I run it, it gives me this error:
Traceback (most recent call last):
File "/Users/luistripa/Downloads/basic-motion-detection/motion_detector.py", line 57, in <module>
cv2.CHAIN_APPROX_SIMPLE)
ValueError: too many values to unpack (expected 2)
Can anyone help me fix this?

try adding [-2:] behind
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2:]

Related

How to create an image of a Licence Plate?

I am using tesseract 5.3.0 With the code below I am able to identify the licence plate and mask it out, however when I resize the licence plate part does not increase. How do I grab just the licence portion and increase it? Any tips on reading the licence plate would also be appreciated.
import cv2
import numpy as np
import imutils
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'/usr/local/bin/tesseract'
# Load the image and convert it to grayscale
image = cv2.imread('/Users/PythonProg/IMG_4592.JPG')
if image is None:
print("Error: Could not load the image")
exit()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Gaussian Blur to reduce noise and smooth the image
gray = cv2.bilateralFilter(gray, 13, 15, 15)
edged = cv2.Canny(gray, 30, 200)
contours = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(contours)
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
# Loop over the contours and find the one with the largest area
licence_plate = None
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.018 * peri, True)
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
detected = 0
print ("No contour detected")
else:
detected = 1
if detected == 1:
cv2.drawContours(image, [screenCnt], -1, (0, 0, 255), 3)
cv2.imwrite('/Users/PythonProg/output.jpg', image)
mask = np.zeros(gray.shape,np.uint8)
licence_plate = cv2.drawContours(mask,[screenCnt],0,255,-1,)
licence_plate = cv2.bitwise_and(image,image,mask=mask)
cv2.imwrite('/Users/anthonywilson/PythonProg/mask.jpg', licence_plate)
# Resize the masked image to a specific size
resized = cv2.resize(licence_plate, (400, 200), interpolation = cv2.INTER_AREA)
# Save the resized image
cv2.imwrite('/Users/PythonProg/resized.jpg', resized)

Problem with opencv (cv2) detecting motion in my code

I have written a code to detect motion through a webcam but whenever I start the code, it works but it detects the entire full screen instead of objects moving in the screen. I noticed that if I close the camera it seems to remove the detection; Here is the code:
import cv2
first_frame = None
video = cv2.VideoCapture(0)
while True:
check, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21),0)
if first_frame is None:
first_frame=gray
continue
delta_frame = cv2.absdiff(first_frame,gray)
thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame,None, iterations=2)
(cnts,_) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 4000:
continue
(x,y, w, h)= cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h),(0,225,0),3)
cv2.imshow("Delta Frame",delta_frame)
cv2.imshow("Capturing",gray)
cv2.imshow("Threshold Frame",thresh_frame)
cv2.imshow("Color Frame",frame)
key = cv2.waitKey(5)
if key==ord('q'):
break
video.release()
cv2.destroyAllWindows()
The code should be able to detect only moving object in the screen.
I had similar issue a while back when I started with opencv; the problem with the program is the first frame, the first frame captured a dark screen. The difference detected by cv2.absdiff of the first frame(first_frame) vs the next frames(gray) was big enough that the cv2.findContours was indicated on the whole screen. This may be caused by camera delay
It can be solved by incorporating a slight delay between when the camera load to when the program records the first frame(first_frame) with time.sleep(). Try this:
import cv2, time
first_frame = None
video = cv2.VideoCapture(0)
# the camera has some lag time hence the starting of video.read outside loop and sleep
video.read()
time.sleep(2)
while True:
check, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21),0)
if first_frame is None:
first_frame=gray
continue
delta_frame = cv2.absdiff(first_frame,gray)
thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame,None, iterations=2)
(cnts,_) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 4000:
continue
(x,y, w, h)= cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h),(0,225,0),3)
cv2.imshow("Delta Frame",delta_frame)
cv2.imshow("Capturing",gray)
cv2.imshow("Threshold Frame",thresh_frame)
cv2.imshow("Color Frame",frame)
key = cv2.waitKey(5)
if key==ord('q'):
break
video.release()
cv2.destroyAllWindows()
time.sleep takes care of the lag time of the camera starting.
Try the code, it worked here

I need help to fix error when call traker.update with GOTURN tracking

I am learning using GOTURN openCV api to track objects. I am following guide from learnopenCV. After initialize tracker and come into the loop, I got the error when update tracker
ok, bbox = tracker.update(frame)
Traceback (most recent call last):
File "<ipython-input-64-e7c5a34c2f7a>", line 1, in <module>
ok, bbox = tracker.update(frame)
error: OpenCV(4.1.0) C:\projects\opencv-python\opencv\modules\dnn\src\layers\convolution_layer.cpp:282: error: (-2:Unspecified error) Number of input channels should be multiple of 3 but got 1 in function 'cv::dnn::ConvolutionLayerImpl::getMemoryShapes'
I am not sure what channel means. I got the frame from video file and the shape is (row, width, 3). I thought channel is 3 but it does not work. I did try with convert frame to gray scale with shape (row, width), but it still not work either.
Below is my code:
import cv2
import sys
background_path = 'images/images_G1_323/background.png'
background_img = cv2.imread(background_path,cv2.IMREAD_GRAYSCALE)
#cv2.imshow('background image',background_img)
tracker = cv2.TrackerGOTURN_create()
video_path = 'videos/G1_323.avi'
cap = cv2.VideoCapture(video_path)
#fgbg = cv2.createBackgroundSubtractorMOG2()
if cap.isOpened() == False:
print('ERROR FILE NOT FOUND OR WRONG CODEC USED!')
sys.exit()
# Read first frame
ok, frame = cap.read()
ok, frame = cap.read()
ok, frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if not ok:
print('Cannot read video file')
sys.exit()
#motion = fgbg.apply(frame)
motion = cv2.absdiff(background_img,frame_gray)
_, thresh1 = cv2.threshold(motion, 10, 255, cv2.THRESH_BINARY)
#gray = cv2.cvtColor(thresh1, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(thresh1, (5, 5), 0)
thresh = cv2.threshold(blurred, 30, 255, cv2.THRESH_BINARY)[1]
erosion_size = 10
dilate_size = 14
thresh = cv2.erode(thresh, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (erosion_size, erosion_size)))
thresh = cv2.dilate(thresh, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (dilate_size, dilate_size)))
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
c = contours[0]
(x, y, w, h) = cv2.boundingRect(c)
bbox = (x, y, w, h)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame,bbox)
while (cap.isOpened):
#
#if ret is true than no error with cap.isOpened
ok, frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if ok==True:
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame_gray, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(frame_gray, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame_gray, "GOTURN Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame_gray, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame_gray)
# Exit if ESC pressed
if cv2.waitKey(100) & 0xFF == ord("q"):
break
else:
break
cap.release()
cv2.destroyAllWindows()
I'm working on the same kind of things, so I hope maybe some of the things I encountered can help you (otherwise I'm sorry for giving a shit answer). I think you should not put it in gray, as it's asking for 3 channels (RGB instead of gray/BW). When you keep the "convert2gray" out, still you're putting: "thresh = cv2.threshold(blurred, 30, 255, cv2.THRESH_BINARY)[1]", which will give you also only 1 channel. Delete the "[1]" at the end and the conversion to gray and maybe it will then work?

Separation of bounding boxes

In this problem we are trying to detect persons in a WEBCAM video in REAL TIME. The code is working fine for 1 person but when more than one person is entering then the code is failing miserably. Here is the code :-
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
kernel = np.ones((5,5), np.uint8)
background = None
while True:
ret,frame = cap.read()
gray = frame.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11,11), 0)
if background is None:
background = gray
continue
delta = cv2.absdiff(background, gray)
thresh = cv2.threshold(delta, 5, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
thresh = cv2.dilate(thresh, kernel, iterations=2)
_,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if(len(contours)==0):
continue
#areas = [cv2.contourArea(c) for c in contours]
#max_index = np.argmax(areas)
#cnt=contours[max_index]
#(x,y,w,h) = cv2.boundingRect(cnt)
#if(1.0*(w*h)/(640*480)<0.75):
#cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 3)
#print("Area: ",w*h)
for i in range(len(contours)):
(x,y,w,h) = cv2.boundingRect(contours[i])
if(w*h<=90000):
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 5)
#cv2.imshow('thresh', thresh)
cv2.imshow('frame', frame)
if cv2.waitKey(1)==27:
break
cap.release()
cv2.destroyAllWindows()
I think the problem is that the code is not able to separate the different contours of the different persons detected but I may not be the only reason. Can someone help me?

Raspberry Pi OpenCV : src is not a numpy array, neither a scalar

I am trying to get my Raspberry Pi B+ to use a USB webcam to measure distances between it and an object of fixed width (11.0 inches).
I am following this guide now. However, instead of using static images, I am using a video feed from my webcam.
This is the code I am trying to run:
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
def find_marker(frame):
# convert the image to grayscale, blur it, and detect edges
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
# find the contours in the edged image and keep the largest one;
# we'll assume that this is our piece of paper in the image
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = max(cnts, key = cv2.contourArea)
# compute the bounding box of the of the paper region and return it
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
# compute and return the distance from the maker to the camera
return (knownWidth * focalLength) / perWidth
#======================================================================
#main is here
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 24.0
# initialize the known object width, which in this case, the piece of
# paper is 11 inches wide
KNOWN_WIDTH = 11.0
frame = cv2.VideoCapture(0)
marker = find_marker(frame)
focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
marker = find_marker(frame)
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
# draw a bounding box around the image and display it
box = np.int0(cv2.cv.BoxPoints(marker))
cv2.drawContours(frame, [box], -1, (0, 255, 0), 2)
cv2.putText(frame, "%.2fft" % (inches / 12),
(frame.shape[1] - 200, frame.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.imshow("Frame", frame)
cv2.waitKey(0)
However, this is the output I get when I try to run it:
Traceback (most recent call last):
File "testcam.py", line 39, in <module>
marker = find_marker(frame)
File "testcam.py", line 10, in find_marker
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
TypeError: src is not a numpy array, neither a scalar
I am new to opencv, so I am unsure what this error means..
The thing you are doing is
frame = cv2.VideoCapture(0)
cv2.VideoCapture(0)
initialize the capture device or the camera device to fetch a frame from that you need to call cap.read() but instead you passed the capture object that gave the error
Which Should be
capForFocal = cv2.VideoCapture(0)
_,frame=capForFocal.read()
capForFocal.release()

Categories