AttributeError: module 'cv2.cv2' has no attribute 'TrackerMOSSE_create' - python

As the Dan's suggestion, i tried to edit this post
Error occurred at setting up MOOSE tracker, I also don't know why this error happened because i installed the Opencv-contrib-python==4.5.1.48.However,aftering installing it,the error is still there. The only tracker is MIL,but i realized that the usage purpose of two trackers is different. I also tried to code tracker = cv2.legacy.TrackerMOSSE_create() like Spyke's suggestion but nothing changes.
This is my code:
import cv2
cap = cv2.VideoCapture(0)
tracker = cv2.TrackerMOSSE_create()
success, img = cap.read()
# select a bounding box ( ROI )
bbox = cv2.selectROI("Tracking", img, False)
tracker.init(img, bbox)
def drawBox(img, bbox):
x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 3, 1)
cv2.putText(img, "Tracking", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
while True:
timer = cv2.getTickCount()
success, img = cap.read()
success, bbox = tracker.update(img)
if success:
drawBox(img, bbox)
else:
cv2.putText(img, "Loss", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
cv2.putText(img, str(int(fps)), (75, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Tracking", img)
if cv2.waitKey(1) & 0xff == ord('q'):
break

Actually the latest version of the opencv now has the "TrackerMOSSE_create" under the legacy class
So, instead of this
tracker = cv2.TrackerMOSSE_create()
use:
tracker = cv2.legacy.TrackerMOSSE_create()

TrackerMOSSE is indeed under the legacy class as stated by #Spyke. However, Instead of:
tracker = cv2.legacy.TrackerMOSSE_create()
I was able to run MOSSE Tracker in OpenCV 4.5.1 with another kind of syntax:
tracker = cv2.legacy_TrackerMOSSE.create()

Related

OpenCV Code which I try cannot detect some ArUco's

As I learned from tutorial videos, I tried this code:
import numpy as np
import cv2
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
}
#In fact there are more dictionary keys than written above. I deleted them to shorten the question.
def aruco_display(corners, ids, rejected, image):
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
cv2.line(image, topLeft, topRight, (0, 255, 0), 2)
cv2.line(image, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2)
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)
cv2.putText(image, str(markerID),(topLeft[0], topLeft[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
print("[Inference] ArUco marker ID: {}".format(markerID))
return image
img = cv2.imread('markers.jpg', 1)
#the first parameter will change according to the name of the photo
aruco_type = ["DICT_4X4_50",
"DICT_4X4_100",
"DICT_4X4_250",
]
for i in aruco_type:
arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[i])
arucoParams = cv2.aruco.DetectorParameters()
corners, ids, rejected = cv2.aruco.ArucoDetector(arucoDict, arucoParams).detectMarkers(img)
detected_markers = aruco_display(corners, ids, rejected, img)
cv2.imshow("Image", detected_markers)
cv2.waitKey(0)
cv2.destroyAllWindows()
This code can detect most of codes but there is still problem due to not detecting some ArUco's like that:
ArUco1
ArUco2
How can I solve this issue?
I think if it can detect some of them, I don't understand why it cannot detect ArUco's on same image.

How to run a command once when a face was detected in OpenCV

I am trying to make it so when a face was detected, it would run a python command once instead of spamming it, for example print()
try:
import cv2
cap = cv2.VideoCapture(0)
pTime = 0
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
face_detected = True
while True:
success, img = cap.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
face_count = 0
cv2.putText(img, f'FPS:{int(fps)}', (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
face_count = face_count+1
cv2.putText(img, 'Face num '+str(face_count), (x-10, y-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, f'Faces Detected: {face_count}', (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
if face_count > 0: # If one or more faces were detected
print("a face was detected")
elif face_count == 0: # If no faces were detected
print("the face magically disappeared")
cv2.imshow("Face Recognition", img)
if cv2.waitKey(1) & 0xFF == ord(' '):
cv2.destroyAllWindows()
break
except KeyboardInterrupt:
print("[KeyboardInterrupt] Exiting...")
time.sleep(2)
exit()
how can i make while the face is detected, it prints only once instead of spamming (without using something like \r)
You need to introduce state and react only when it changes.
Without copy-pasting your entire code, I'll just show you the idea.
# the state variable
seeing_faces = False
while True:
frame = get_frame()
faces = detect_faces(frame)
there_are_faces_now = (len(faces) > 0)
# compare state to current detection, different? state CHANGES
if there_are_faces_now and not seeing_faces:
print("NOW I see faces")
elif seeing_faces and not there_are_faces_now:
print("no longer seeing faces")
else:
# no change
# update state
seeing_faces = there_are_faces_now
You can track the number of detected faces every frame and execute the command you want when the number of faces increases:
try:
import cv2
cap = cv2.VideoCapture(0)
pTime = 0
cascPath=os.path.dirname(cv2.__file__)+"/data/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
face_detected = True
currentFaces = 0
while True:
success, img = cap.read()
img = cv2.flip(img, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
face_count = 0
cv2.putText(img, f'FPS:{int(fps)}', (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
face_count = face_count+1
cv2.putText(img, 'Face num '+str(face_count), (x-10, y-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.putText(img, f'Faces Detected: {face_count}', (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
print(f"Faces Detected: {face_count}", end="\r")
#if face_count > 0: # If one or more faces were detected
# print("a face was detected")
#elif face_count == 0: # If no faces were detected
# print("the face magically disappeared")
if face_count > currentFaces:
print("Command to execute, face detected")
currentFaces = face_count
cv2.imshow("Face Recognition", img)
if cv2.waitKey(1) & 0xFF == ord(' '):
cv2.destroyAllWindows()
break
except KeyboardInterrupt:
print("[KeyboardInterrupt] Exiting...")
time.sleep(2)
exit()

How to multithreading without causing app freeze or lag?

Hey I started learning python not so long time ago. Right now Im creating (or more likely trying to) face and motion detection script based on OpenCV library. Unfortunately Im stuck since few days cause I cant solve problem with I guess its called multi threading.
Here is my code:
import time
import cv2
import datetime
from discord_webhook import DiscordWebhook
import threading
faceCascade = cv2.CascadeClassifier("face_recognition.xml")
# define a video capture object
video_capture = cv2.VideoCapture(0)
#writing video
frame_width = int(video_capture.get(3))
frame_height = int(video_capture.get(4))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
out = cv2.VideoWriter(datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p") + '.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
#screenshot when detectors get triggered
def screenshot():
cv2.imwrite('screenshot.png',video_capture.read()[1])
#webhook notify
def alert():
webhook = DiscordWebhook(url="", rate_limit_retry=True,
content='!ALERT!')
webhook.execute()
while(True):
# Capture the video frame by frame
ret, frame = video_capture.read()
text="not detected"
text1="not detected"
timestamp = datetime.datetime.now()
#face recognition
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30))
if int(format(len(faces))) > 0:
#print("Found {0} faces!".format(len(faces)))
text="detected"
else:
text="not detected"
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#motion detection
ret, frame1 = video_capture.read()
difference = cv2.absdiff(frame, frame1) # find the difference between the frames
gray = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) # create threshold
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 5000:
continue
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
text1="detected"
# Display the resulting frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Face status: {}".format(text), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, "Motion status: {}".format(text1), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 1)
#check if motion is detected if not change status text
if text1 == "not detected":
text1="detected"
else:
text1="not detected"
out.write(frame)
cv2.imshow('Press Q to quit', frame)
# the 'q' button is set as the
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
video_capture.release()
out.release()
cv2.destroyAllWindows()
Im trying to send webhook and make screen shot every 3 minutes if motion is detected but for loops completely lags this app. Tried time.sleep or thread timers but it only freezes or lags app. If someone can explain me how to solve this problem I will be very thankful. Have a great day or night

Object Detection Using Raspberry Pi and Android IP Camera with Python and OpenCV

Here is my code that I have used for object detection using raspberry pi and Android Ip Camera. Here I'm not getting any output and the code does not provide any errors. Can someone figure out what is the error?
import urllib.request
import cv2
import numpy as np
import datetime
import math
#global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 3000 #Adjust ths value according to your usage
BinarizationThreshold = 70 #Adjust ths value according to your usage
OffsetRefLines = 150 #Adjust ths value according to your usage
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
This is the code i have used to obtain the video stream from my IP camera
ReferenceFrame = None
while True:
camera=cv2.VideoCapture("http://192.168.1.6:8080/shot.jpg")
camera.set(3,640)
camera.set(4,480)
(ret,Frame)=camera.read()
height = np.size(Frame,0)
width = np.size(Frame,1)
#if cannot grab a frame, this program ends here.
if not ret:
break
This is the code part i have used to display the lines and frame for object detection and object counting
#gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
CoorYEntranceLine = (height / 2)-OffsetRefLines
CoorYExitLine = (height / 2)+OffsetRefLines
cv2.line(Frame, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (255, 0, 0), 2)
cv2.line(Frame, (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = (x+x+w)/2
CoordYCentroid = (y+y+h)/2
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow('Salida',Frame)
cv2.waitKey(1);
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The correct code
import numpy as np
import math
def nothing(x):
pass
width=0
height=0
EntranceCounter = 0
OffsetRefLines = 150
ExitCounter = 0
BinarizationThreshold = 70
MinCountourArea = 3000
cap = cv2.VideoCapture(0);
path="http://192.168.1.6:8080/video"
cap.open(path)
ReferenceFrame = None
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
#cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)
while True:
#frame = cv2.imread('smarties.png')
if cap.isOpened():
rval, frame = cap.read()
while rval:
rval,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.GaussianBlur(hsv, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = hsv
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, hsv)
FrameThresh = cv2.threshold(FrameDelta, 25, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
cv2.line(frame, (0,170), (2000,170), (255, 0, 0), 5)
cv2.line(frame, (0,470), (2000,470), (0, 0, 255), 5)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = int(x+x+w)/2
CoordYCentroid = int(y+y+h)/2
ObjectCentroid = (x,y)
cv2.circle(frame, ObjectCentroid, 2, (0, 255, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,170,470)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,170,470)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (250, 0, 1), 2)
cv2.putText(frame, "Exits: {}".format(str(ExitCounter)), (10, 110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
imS = cv2.resize(frame, (400, 400)) # Resize image
#imSS = cv2.resize(mask, (200, 200))
#imSSS = cv2.resize(frame, (200, 200))
cv2.imshow("frame", imS)
#cv2.imshow("mask", imSS)
#cv2.imshow("res", imSSS)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()

Using RaspberryPi 3 with Pi-Camera and OpenCV to do PeopleCounting From the Top

I have been stuck for sometime now. Does anyone know of any links that can help me with using
(Hardware)- Raspberry Pi 3 connected to a Pi Cam NOT webcam
Then using both hardware mentioned above i wan to use any available software I'm guessing openCV to do people counting from the top.
Example Video: https://www.youtube.com/watch?v=BszUJXLR2oA
Almost all available examples using the raspberry pi to do people counting from the top doesnt use the picam.. webcams are big and bulky. So if there is any tutorial or what available please help. Thank You
==========================================================================
What i tried:
So the problem I'm having is i have a sample code that uses openCV with a webcam.. instructions can be found here :https://www.hackster.io/deligence-technologies/person-counting-system-using-opencv-and-python-faf14f
And in this code it uses a usb webcam thus the line that i commented that says "#HERE i need to use the pi cam instead" that line is using cv2.VideoCapture(0).. i need to know how to use the picam instead. any ideas?
import argparse
import datetime
import imutils
import math
import cv2
import numpy as np
width = 800
textIn = 0
textOut = 0
def testIntersectionIn(x, y):
res = -450 * x + 400 * y + 157500
if((res >= -550) and (res < 550)):
print (str(res))
return True
return False
def testIntersectionOut(x, y):
res = -450 * x + 400 * y + 180000
if ((res >= -550) and (res <= 550)):
print (str(res))
return True
return False
camera = cv2.VideoCapture(0) #HERE i need to use the pi cam instead
firstFrame = None
while True:
(grabbed, frame) = camera.read()
text = "Unoccupied"
if not grabbed:
break
frame = imutils.resize(frame, width=width)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
if cv2.contourArea(c) < 12000:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.line(frame, (width / 2, 0), (width, 450), (250, 0, 1), 2) #blue line
cv2.line(frame, (width / 2 - 50, 0), (width - 50, 450), (0, 0, 255), 2)#red line
rectagleCenterPont = ((x + x + w) /2, (y + y + h) /2)
cv2.circle(frame, rectagleCenterPont, 1, (0, 0, 255), 5)
if(testIntersectionIn((x + x + w) / 2, (y + y + h) / 2)):
textIn += 1
if(testIntersectionOut((x + x + w) / 2, (y + y + h) / 2)):
textOut += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.putText(frame, "In: {}".format(str(textIn)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, "Out: {}".format(str(textOut)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
cv2.imshow("Security Feed", frame)
camera.release()
cv2.destroyAllWindows()
You say you read the link I posted in my comment but that is obviously not the case.
For clarity, this tutorial shows you how to do what you want to do and you need to read the code in the article and bring that into your code base.
What you are trying to do in your code is open the first USB webcam attached to your raspberry Pi. You do that on this line here:
camera = cv2.VideoCapture(0) #HERE i need to use the pi cam instead
As your comment does indeed state.
What you need to do is instead use the PiCamera library, like this:
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
rawCapture = PiRGBArray(camera)
# allow the camera to warmup
time.sleep(0.1)
# grab an image from the camera
camera.capture(rawCapture, format="bgr")
image = rawCapture.array
# display the image on screen and wait for a keypress
cv2.imshow("Image", image)
cv2.waitKey(0)
The above example and tutorial should get you up and running with the basics, you can then modify the Hackster tutorial that you are following to use the PiCamera instead.

Categories