problems with overflow from yellow to white - python

my task is this: having a white sheet, add text to it, embed a picture and make a gradient. I understood how to add text and embed an image. And here's how to make an overflow. for example, from yellow to white or from black to white, I do not understand.I would also like to understand how to move a cloud The final picture should be like this.enter image description here
my code:
import cv2
white_list = cv2.imread('python_snippets/external_data/probe.jpg')
cloud = cv2.imread('python_snippets/external_data/weather_img/cloud.jpg')
white_list[:cloud.shape[0], :cloud.shape[1]] = cloud
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
thickness = 2
cv2.putText(white_list, '+5', org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow('img', white_list)
cv2.waitKey(0)
cv2.destroyAllWindows()

def painting_background(self, color, white_list):
r, g, b = color
for x in range(white_list.shape[1]):
if r < 255:
r += 1
if g < 255:
g += 1
if b < 255:
b += 1
self.white_list[:, x:x + 1] = (r, g, b)
def maker(self, color, degree):
x = 0
y = 400
weather_icon = self.weather_icons[color]
self.painting_background(color, self.white_list)
self.white_list[x:x + weather_icon.shape[0], y:y + weather_icon.shape[1]] = weather_icon
cv2.putText(self.white_list, degree, (360, 150), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2, cv2.LINE_AA)

Related

OpenCV Color Detection in video ROI

I am trying to figure out color in a specific ROI in Traffic Light Video. The code although predicts the color correctly it doesn't do it for the specific ROI i am looking at.
Initially when the traffic video starts the ROI region has no (RGY) colors but it still predicts and shows RED based on other areas. What am i doing wrong.
Have uploaded the test Video here for testing -- https://ufile.io/ha20buns
Python Code below.
import cv2
import numpy as np
cap = cv2.VideoCapture('D:\Videos\Sample.mp4')
while True:
ret,frame = cap.read()
if ret == False:
break
frame = cv2.resize(frame,(1920 ,1080))
#Extract required section from entire frame
roiColor = cv2.rectangle(frame.copy(),(1022, 565),(1411, 709),(255,255,255),2) #For SampleTL.mp4
blcolor = (255, 0, 0)
cv2.rectangle(frame, (1022, 565),(1411, 709), blcolor)
hsv = cv2.cvtColor(roiColor,cv2.COLOR_BGR2HSV)
#red
lower_hsv_red = np.array([157,177,122])
upper_hsv_red = np.array([179,255,255])
mask_red = cv2.inRange(hsv,lowerb=lower_hsv_red,upperb=upper_hsv_red)
red_blur = cv2.medianBlur(mask_red, 7)
#green
lower_hsv_green = np.array([49,79,137])
upper_hsv_green = np.array([90,255,255])
mask_green = cv2.inRange(hsv,lowerb=lower_hsv_green,upperb=upper_hsv_green)
green_blur = cv2.medianBlur(mask_green, 7)
lower_hsv_yellow = np.array([15,150,150])
upper_hsv_yellow = np.array([35,255,255])
mask_yellow = cv2.inRange(hsv,lowerb=lower_hsv_yellow,upperb=upper_hsv_yellow)
yellow_blur = cv2.medianBlur(mask_yellow, 7)
#Because the image is a binary image, If the image has a white point, which is 255, then take his maximum max value 255
red_color = np.max(red_blur)
green_color = np.max(green_blur)
yellow_color = np.max(yellow_blur)
if red_color == 255:
print('red')
cv2.rectangle(frame,(1020,50),(1060,90),(0,0,255),2 ) #Draw a rectangular frame by coordinates
cv2.putText(frame, "red", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255),2) #red text information
elif green_color == 255:
print('green')
cv2.rectangle(frame,(1020,50),(1060,90),(0,255 ,0),2)
cv2.putText(frame, "green", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0),2)
elif yellow_color == 255:
print('yellow')
cv2.rectangle(frame,(1020,50),(1060,90),(0,255 ,0),2)
cv2.putText(frame, "yellow", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0),2)
cv2.imshow('frame',frame)
red_blur = cv2.resize(red_blur,(300,200))
green_blur = cv2.resize(green_blur,(300,200))
yellow_blur = cv2.resize(yellow_blur, (300,200))
#cv2.imshow('red_window',red_blur)
#cv2.imshow('green_window',green_blur)
#cv2.imshow('yellow_window',yellow_blur)
c = cv2.waitKey(10)
if c==27:
break
cap.release()
cv2.destroyAllWindows() # destroy all opened windows
cv2.rectangle doesn't crop the image but returns the original image with a drawn rectangle. Try this instead:
roiColor = frame[565:709, 1022:1411]

Object Detection Using Raspberry Pi and Android IP Camera with Python and OpenCV

Here is my code that I have used for object detection using raspberry pi and Android Ip Camera. Here I'm not getting any output and the code does not provide any errors. Can someone figure out what is the error?
import urllib.request
import cv2
import numpy as np
import datetime
import math
#global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 3000 #Adjust ths value according to your usage
BinarizationThreshold = 70 #Adjust ths value according to your usage
OffsetRefLines = 150 #Adjust ths value according to your usage
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
This is the code i have used to obtain the video stream from my IP camera
ReferenceFrame = None
while True:
camera=cv2.VideoCapture("http://192.168.1.6:8080/shot.jpg")
camera.set(3,640)
camera.set(4,480)
(ret,Frame)=camera.read()
height = np.size(Frame,0)
width = np.size(Frame,1)
#if cannot grab a frame, this program ends here.
if not ret:
break
This is the code part i have used to display the lines and frame for object detection and object counting
#gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
CoorYEntranceLine = (height / 2)-OffsetRefLines
CoorYExitLine = (height / 2)+OffsetRefLines
cv2.line(Frame, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (255, 0, 0), 2)
cv2.line(Frame, (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = (x+x+w)/2
CoordYCentroid = (y+y+h)/2
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow('Salida',Frame)
cv2.waitKey(1);
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The correct code
import numpy as np
import math
def nothing(x):
pass
width=0
height=0
EntranceCounter = 0
OffsetRefLines = 150
ExitCounter = 0
BinarizationThreshold = 70
MinCountourArea = 3000
cap = cv2.VideoCapture(0);
path="http://192.168.1.6:8080/video"
cap.open(path)
ReferenceFrame = None
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
#cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)
while True:
#frame = cv2.imread('smarties.png')
if cap.isOpened():
rval, frame = cap.read()
while rval:
rval,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.GaussianBlur(hsv, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = hsv
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, hsv)
FrameThresh = cv2.threshold(FrameDelta, 25, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
cv2.line(frame, (0,170), (2000,170), (255, 0, 0), 5)
cv2.line(frame, (0,470), (2000,470), (0, 0, 255), 5)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = int(x+x+w)/2
CoordYCentroid = int(y+y+h)/2
ObjectCentroid = (x,y)
cv2.circle(frame, ObjectCentroid, 2, (0, 255, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,170,470)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,170,470)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (250, 0, 1), 2)
cv2.putText(frame, "Exits: {}".format(str(ExitCounter)), (10, 110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
imS = cv2.resize(frame, (400, 400)) # Resize image
#imSS = cv2.resize(mask, (200, 200))
#imSSS = cv2.resize(frame, (200, 200))
cv2.imshow("frame", imS)
#cv2.imshow("mask", imSS)
#cv2.imshow("res", imSSS)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()

How to change Color of the Rectangle and Color of the Font in Bounding Box of an Detected Object

I want to ask how to change the color of Bounding Box and Font of a detected object when it is past a line. I am currently working on a project on human walking speed estimation by using Haar-cascade. The program works as such: the detected object passes two imaginary lines and when it passes the second line the program will show the speed. If the speed of the detected humans is below 3 km/h, the Bounding Box and the font will be shown in RED, but if it is more than 3 km/h it will be shown in GREEN. And I want the text of the speed to be shown for 5 seconds.
Hope you can help me solve this. Here's the program that I've worked on.
import time
cascade_src = 'haarcascade_fullbody.xml'
video_src = 'video-1.mp4'
#line a
ax1=15
ay=225
ax2=600
#line b
bx1=15
by=275
bx2=600
#car num
i = 1
start_time = time.time()
#video ....
cap = cv2.VideoCapture(video_src)
human_cascade = cv2.CascadeClassifier(cascade_src)
videoWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
videoHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('wisuda-14.mp4', fourcc, 25.0, (videoWidth,videoHeight))
def Speed_Cal(time):
try:
Speed = (9.144*3600)/(time*1000)
return Speed
except ZeroDivisionError:
print (5)
while True:
ret, img = cap.read()
if (type(img) == type(None)):
break
#bluring to have exacter detection
blurred = cv2.blur(img, ksize=(3,3))
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
human = human_cascade.detectMultiScale(gray, scaleFactor=1.04865, minNeighbors=6)
#line a #i know road has got
cv2.line(img,(ax1,ay),(ax2,ay),(255,0,0),2)
#line b
cv2.line(img,(bx1,by),(bx2,by),(255,0,0),2)
for (x,y,w,h) in human:
cv2.rectangle(img, (x,y), (x + w, y + h), (0, 0, 255), 2)
roi_blurred = blurred[x: x + h, y:y + w]
roi_gray = gray[x: x + h, y:y + w]
roi_img = img[x: x + h, y:y + w]
cv2.circle(img,(int((x+x+w)/2),int((y+y+h)/2)), 2,(0,255,0), -1)
#cv2.putText(img, "ID : " + str(i), (x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1);
while int(ay) == int((y+y+h)/2):
start_time = time.time()
break
while int(ay) <= int((y+y+h)/2):
if int(by) <= int((y+y+h)/2)&int(by+10) >= int((y+y+h)/2):
cv2.line(img, (bx1,by), (bx2,by), (0,255,0), 2)
Speed = Speed_Cal(time.time() - start_time)
print("ID Number "+str(i)+" Speed: " + str(int(Speed)))
i = i + 1
cv2.putText(img, "Speed: "+str(int(Speed))+"km/jam", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2);
break
else :
break
out.write(img)
cv2.imshow('video', img)
cv2.imshow('Gray', gray)
cv2.imshow('Blurr', blurred)
if cv2.waitKey(33) == 27:
break
cap.release()
out.release()
cv2.destroyAllWindows()
I do really hope you can help me guys, please.
cv2.rectangle(img, (x,y), (x + w, y + h), (0, 0, 255), 2) the tuple with 3 elements: (0, 0, 255) is correspondent to the RGB (or BGR I forgot) value of the bounding rectangle, changing the values will change the color. For more information on bounding rectangles, check out the OpenCV drawing functions doc: https://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html
As for the text color, cv2.putText(img, "Speed: "+str(int(Speed))+"km/jam", (x,y), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2), changing the tuple (255,0,0) will change the text color.
import time
start = time.time()
sec = 5
while True:
if condition:
start = time.time()
if time.time() - start < sec:
#do whatever

Text within image in OpenCV

I was trying to get some text on an image using OpenCV. But the text is too long for one line and goes out of the image, instead of the next line. I could hardcode the spaces, but I was looking for a more dynamic solution. How do I work around this issue?
def get_text(img, text):
sh = img.shape
txt = np.ones(shape=sh)
fontface = cv2.FONT_HERSHEY_SIMPLEX
fontscale = 1
thickness = 2
color = (0, 0, 0)
orig = (10, 100)
linetype = cv2.LINE_AA
txt = cv2.putText(txt, text, orig, fontface, fontscale, color, thickness, linetype)
txt = txt.astype("uint8")
return txt
import textwrap
def get_text(img, text):
sh = img.shape
txt = np.ones(shape=sh)
fontface = cv2.FONT_HERSHEY_SIMPLEX
fontscale = 1
thickness = 2
color = (0, 0, 0)
orig = (10, 100)
linetype = cv2.LINE_AA
wrapped_text = textwrap.wrap(text, width=35)
x, y = 10, 40
font_size = 1
font_thickness = 2
i = 0
for line in wrapped_text:
textsize = cv2.getTextSize(line, font, font_size, font_thickness)[0]
gap = textsize[1] + 10
y = int((img.shape[0] + textsize[1]) / 2) + i * gap
x = int((img.shape[1] - textsize[0]) / 2)
cv2.putText(img, line, (x, y), font,
font_size,
(0,0,0),
font_thickness,
lineType = cv2.LINE_AA)
txt = txt.astype("uint8")
return txt
Try this, may require some adjustment, but the idea is to use textwrap.wrap(text, width=35).
in this code i simply split string into parts according image width.
# Python program to explain cv2.putText() method
import cv2
import math
import textwrap
path = r'YOUR PATH OF IMAGE'
image = cv2.imread(path)
window_name = 'Image'
font = cv2.FONT_HERSHEY_SIMPLEX
zero= 5
one =50
org = (zero, one)
fontScale = 1
color = (255, 0, 0)
thickness = 2
imageHeight = image.shape[0]
imageWidth = image.shape[1]
print(f"width:",imageWidth)
sizeofnumpix=min(imageWidth,imageHeight)/(25/fontScale)
stringputt = 'YOUR STRING'
i=len(stringputt)
print(i)
if i*sizeofnumpix > imageWidth*2:
n=math.ceil(i*sizeofnumpix/(imageWidth*2))
part_size = math.ceil(i/n)
txt = textwrap.wrap(stringputt, part_size)
for l in txt:
image = cv2.putText(image, l, org, font,fontScale, color, thickness, cv2.LINE_AA)
zero= 5
one = one+math.ceil(sizeofnumpix)
org = (zero, one)
else:
image = cv2.putText(image, stringputt, org, font,fontScale, color, thickness, cv2.LINE_AA)
# Displaying the image
cv2.imwrite('IMGWITHTXT.png',image)

How to track 1 object instead of multiple?

I wish to alter this code in order to track a singular large object in motion, i.e. a person, when I run the code as is, the display will track a person as multiple objects rather than as one singular object.
Ignoring the firebase, I want to draw the rectangle around the entire object, rather than parts of the object.
Also, I wish to change the orientation of the lines set on the display from horizontal to vertical, please?
import datetime
import math
import cv2
import numpy as np
from firebase import firebase
# global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
min_area = 3000 # Adjust ths value according to your usage
_threshold = 70 # Adjust ths value according to your usage
OffsetRefLines = 150 # Adjust ths value according to your usage
# Check if an object in entering in monitored zone
def check_entrance_line_crossing(y, coor_y_entrance, coor_y_exit):
abs_distance = abs(y - coor_y_entrance)
if ((abs_distance <= 2) and (y < coor_y_exit)):
return 1
else:
return 0
# Check if an object in exitting from monitored zone
def check_exit_line_crossing(y, coor_y_entrance, coor_y_exit):
abs_distance = abs(y - coor_y_exit)
if ((abs_distance <= 2) and (y > coor_y_entrance)):
return 1
else:
return 0
camera = cv2.VideoCapture(0)
# force 640x480 webcam resolution
camera.set(3, 640)
camera.set(4, 480)
ReferenceFrame = None
# Frames may discard while adjusting to light
for i in range(0, 20):
(grabbed, Frame) = camera.read()
while True:
(grabbed, Frame) = camera.read()
height = np.size(Frame, 0)
width = np.size(Frame, 1)
# if cannot grab a frame, this program ends here.
if not grabbed:
break
# gray-scale and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
# Background subtraction and image manipulation
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, _threshold, 255, cv2.THRESH_BINARY)[1]
# Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
qtty_of_count = 0
# plot reference lines (entrance and exit lines)
coor_y_entrance = (height // 2) - OffsetRefLines
coor_y_exit = (height // 2) + OffsetRefLines
cv2.line(Frame, (0, coor_y_entrance), (width, coor_y_entrance), (255, 0, 0), 2)
cv2.line(Frame, (0, coor_y_exit), (width, coor_y_exit), (0, 0, 255), 2)
# check all found count
for c in cnts:
# if a contour has small area, it'll be ignored
if cv2.contourArea(c) < min_area:
continue
qtty_of_count = qtty_of_count + 1
app = firebase.FirebaseApplication('https://finalyearproj-caa49.firebaseio.com/', None)
## result = app.post('/people', {'count': qtty_of_count})##
update = app.put('/people', "count", qtty_of_count)
print("Updated value in FB" + str(update))
# draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# find object's centroid
coor_x_centroid = (x + x + w) // 2
coor_y_centroid = (y + y + h) // 2
ObjectCentroid = (coor_x_centroid, coor_y_centroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (check_entrance_line_crossing(coor_y_centroid, coor_y_entrance, coor_y_exit)):
EntranceCounter += 1
if (check_exit_line_crossing(coor_y_centroid, coor_y_entrance, coor_y_exit)):
ExitCounter += 1
print("Total countours found: " + str(qtty_of_count))
# Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("Original Frame", Frame)
cv2.waitKey(1)
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()

Categories