Real Time Measurement Tools with Opencv and Python - python

I try to build a real time measurement algorithm. My problem is as you can see a picture; enter image description here
I need a diagonal measurement but I just found these 2 edges and one diagonal. When I start to find other diagonal, I took an error. Because calculation just start this corner, I couldn't start another corner. when I try to start another corner I took an error. this corner is 0.
I couldn't understand why I couldn't start with another corner.
Measurement code
import cv2
import utlis
###################################
webcam = True
path = '1.jpg'
cap = cv2.VideoCapture(0)
cap.set(10, 160)
cap.set(3, 1920)
cap.set(4, 1080)
scale = 3
wP = 210 * scale
hP = 297 * scale
###################################
while True:
if webcam:
success, img = cap.read()
else:
img = cv2.imread(path)
imgContours, conts = utlis.getContours(img, minArea=50000, filter=4)
if len(conts) !=0:
biggest = conts[0][2]
print(biggest)
imgWarp = utlis.warpImg(img, biggest, wP, hP)
imgContours2, conts2 = utlis.getContours(imgWarp,
minArea=2000, filter=4,
cThr=[50, 50], draw=False)
if len(conts) != 0:
for obj in conts2:
cv2.polylines(imgContours2, [obj[2]], True, (0, 255, 0), 2)
nPoints = utlis.reorder(obj[2])
nW = round((utlis.findDis(nPoints[0][0] // scale, nPoints[1][0] // scale) / 10), 1)
nH = round((utlis.findDis(nPoints[0][0] // scale, nPoints[2][0] // scale) / 10), 1)
nQ = round((utlis.findDis(nPoints[0][0] // scale, nPoints[3][0] // scale) / 10), 1)
nW2 = round((utlis.findDis(nPoints[2][2] // scale, nPoints[1][2] // scale) / 10), 1)
#nH2 = round((utlis.findDis(nPoints[3][3] // scale, nPoints[2][0] // scale) / 10), 1)
#nZ = round((utlis.findDis(nPoints[3][3] // scale, nPoints[3][0] // scale) / 10), 1)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[1][0][0], nPoints[1][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[2][0][0], nPoints[2][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[3][0][0], nPoints[3][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
#cv2.arrowedLine(imgContours2, (nPoints[3][3][3], nPoints[3][3][1]),
#(nPoints[1][0][0], nPoints[1][0][1]),
#(255, 0, 255), 3, 8, 0, 0.05)
x, y, w, h = obj[3]
cv2.putText(imgContours2, '{}cm'.format(nW), (x + 30, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.putText(imgContours2, '{}cm'.format(nH), (x - 70, y + h // 2), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.putText(imgContours2, '{}cm'.format(nQ), (x + 200 , y + 200 ), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.imshow('A4', imgContours2)
img = cv2.resize(img, (0, 0), None, 0.5, 0.5)
cv2.imshow('Original', img)
cv2.waitKey(1)
utlis
import cv2
import numpy as np
def getContours(img, cThr=[100, 100], showCanny=False, minArea=1000, filter=0, draw=False):
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgCanny = cv2.Canny(imgBlur, cThr[0], cThr[1])
kernel = np.ones((5, 5))
imgDial = cv2.dilate(imgCanny, kernel, iterations=3)
imgThre = cv2.erode(imgDial, kernel, iterations=2)
if showCanny: cv2.imshow('Canny', imgThre)
contours, hiearchy = cv2.findContours(imgThre, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
finalCountours = []
for i in contours:
area = cv2.contourArea(i)
if area > minArea:
peri = cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i, 0.02 * peri, True)
bbox = cv2.boundingRect(approx)
if filter > 0:
if len(approx) == filter:
finalCountours.append([len(approx), area, approx, bbox, i])
else:
finalCountours.append([len(approx), area, approx, bbox, i])
finalCountours = sorted(finalCountours, key=lambda x: x[1], reverse=True)
if draw:
for con in finalCountours:
cv2.drawContours(img, con[4], -1, (0, 0, 255), 3)
return img, finalCountours
def reorder(myPoints):
# print(myPoints.shape)
myPointsNew = np.zeros_like(myPoints)
myPoints = myPoints.reshape((4, 2))
add = myPoints.sum(1)
myPointsNew[0] = myPoints[np.argmin(add)]
myPointsNew[3] = myPoints[np.argmax(add)]
diff = np.diff(myPoints, axis=1)
myPointsNew[1] = myPoints[np.argmin(diff)]
myPointsNew[2] = myPoints[np.argmax(diff)]
return myPointsNew
def warpImg(img, points, w, h, pad=20):
# print(points)
points = reorder(points)
pts1 = np.float32(points)
pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w, h))
imgWarp = imgWarp[pad:imgWarp.shape[0] - pad, pad:imgWarp.shape[1] - pad]
return imgWarp
def findDis(pts1, pts2):
return ((pts2[0] - pts1[0]) ** 2 + (pts2[1] - pts1[1]) ** 2) ** 0.5

Related

Saving the IDs for cars with the help of Centroid, The IDs don't remove or update

I am using OpenCv and Trained model and trying assign IDs to the cars with the help of Centroid. There are the following scenarios:
at the beginning all recognized cars get centroid and ID
the cars that leave the frame, their IDs should be removed
new cars that enter into the frame (later) should get new IDs
I have the ID code from a youtube Video , but it dosen't work as it should. the Remove method , removes everything.
If I leave the update method on, the Cars get new IDs every frame. that should not happen.
I am new to the programming and I would appreciate if someone could help me here out.
import cv2
import numpy as np
import pandas as pd
import math
video_name = "videos.mp4"
cap = cv2.VideoCapture(video_name)
net = cv2.dnn.readNetFromONNX("best.onnx")
classes = ['car', 'free_space']
count = 0
center_points_prev_frame = []
tracking_objects = {}
track_id = 0
while True:
ret, img = cap.read()
# ret, frame1 = cap.read()
count += 1
if img is None:
break
img = cv2.resize(img, (1500, 1000))
# frame1 = cv2.resize(frame1, (1500, 1000))
blob = cv2.dnn.blobFromImage(img, scalefactor=1 / 255,
size=(640, 640),
mean=[0, 0, 0, 0],
swapRB=True,
crop=False)
net.setInput(blob)
detections = net.forward()[0]
# print(detections.shape)
# Aufbau: cx, cy, w, h, confidence, class_score
classes_ids = []
confidences = []
boxes = []
rows = detections.shape[0]
img_width, img_height = img.shape[1], img.shape[0]
x_scale = img_width / 640
y_scale = img_height / 640
# apply Non-Maximum Suppression
for i in range(rows):
row = detections[i]
confidence = row[4]
if confidence > 0.3:
classes_score = row[5:]
ind = np.argmax(classes_score)
if classes_score[ind] > 0.3:
classes_ids.append(ind)
confidences.append(confidence)
cx, cy, w, h = row[:4]
x1 = int((cx - w / 2) * x_scale)
y1 = int((cy - h / 2) * y_scale)
# print("X1:",x1 ,"Y1",y1)
width = int(w * x_scale)
height = int(h * y_scale)
box = np.array([x1, y1, width, height])
boxes.append(box)
indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.3, 0.3)
# Point current frame
center_points_cur_frame = []
for i in indices:
x1, y1, w, h = boxes[i]
label = classes[classes_ids[i]]
conf = confidences[i]
text = label + "{:.2f}".format(conf)
if label == 'car':
car_coordinates = [(x1, y1), (x1 + w, y1 + h)]
#cv2.rectangle(img, (x1, y1), (x1 + w, y1 + h), (51, 51, 255), 2)
# center points
cx = int((x1 + x1 + w) / 2)
cy = int((y1 + y1 + h) / 2)
cv2.circle(img, (cx,cy), 3, (255, 0, 255), -1)
cv2.putText(img, str(track_id), (cx,cy), cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 0, 255), 1)
center_points_cur_frame.append((cx, cy))
# Only at the beginning we compare previous and current frame
if count <= 2:
for pt in center_points_cur_frame:
for pt2 in center_points_prev_frame:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
if distance < 20:
tracking_objects[track_id] = pt
track_id += 1
else:
tracking_objects_copy = tracking_objects.copy()
center_points_cur_frame_copy = center_points_cur_frame.copy()
for object_id, pt2 in tracking_objects_copy.items():
object_exists = False
for pt in center_points_cur_frame_copy:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
# Update IDs position
if distance < 20:
tracking_objects[object_id] = pt
object_exists = True
if pt in center_points_cur_frame:
center_points_cur_frame.remove(pt)
continue
############################### Problem ##########################################
# Remove IDs lost
if not object_exists:
tracking_objects.pop(object_id)
# Add new IDs found
for pt in center_points_cur_frame:
tracking_objects[track_id] = pt
track_id += 1
############################### Problem ##########################################
for object_id, pt in tracking_objects.items():
cv2.circle(img, pt, 3, (255, 0, 255), -1)
cv2.putText(img, str(object_id), (pt[0], pt[1] - 2), cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 0, 255), 1)
print("Tracking objects")
print(tracking_objects)
print("CUR FRAME LEFT PTS")
print(center_points_cur_frame)
# Make a copy of the points
center_points_prev_frame = center_points_cur_frame.copy()
cv2.imshow("Video", img)
cv2.waitKey(1)
# After the loop release the cap object
cap.release()
# Destroy all the windows
cv2.destroyAllWindows()

i = i[0] leads to IndexError: invalid index to scalar variable [duplicate]

This question already has answers here:
How to fix IndexError: invalid index to scalar variable
(6 answers)
Closed 2 months ago.
Im getting an error every time i run this script in my environment
Traceback (most recent call last):
File "FaceMark.py", line 55, in <module>
i = i[0]
IndexError: invalid index to scalar variable.
The camera does turns on but as soon it detects my hand or face the camera windows shuts down and it throws me the error posted above.
here's the entire script:
import cv2
import mediapipe as mp
import time
import numpy as np
thres = 0.45 # Threshold to detect object
nms_threshold = 0.2`your text`
cap = cv2.VideoCapture()
cap.set(3, 1280)
cap.set(4, 720)
cap.set(10, 150)
classNames = []
classFile = 'coco.names'
with open(classFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
cap = cv2.VideoCapture(0)
pTime = 0
cTime = 0
mpDraw = mp.solutions.drawing_utils
mpFaceMesh = mp.solutions.face_mesh
faceMesh = mpFaceMesh.FaceMesh(max_num_faces=2)
drawSpec = mpDraw.DrawingSpec(thickness=1, circle_radius=2)
mpHands = mp.solutions.hands
hands = mpHands.Hands()
mpDrawHand = mp.solutions.drawing_utils
while True:
success, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
classIds, confs, bbox = net.detect(img, confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
results = faceMesh.process(imgRGB)
resultsHand = hands.process(imgRGB)
for i in indices:
i = i[0]
box = bbox[i]
# colors = np.random.uniform(0, 255, size=(len(box), 3))
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y), (x + w, h + y), color=(0, 255, 0), thickness=2)
cv2.putText(img, classNames[classIds[i][0] - 1].upper(), (box[0] + 10, box[1] + 30),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
print("Objects Ids: ", classIds)
if resultsHand.multi_hand_landmarks:
for handLms in resultsHand.multi_hand_landmarks:
for id, lm in enumerate(handLms.landmark):
print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
mpDrawHand.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)
if results.multi_face_landmarks:
for faceLms in results.multi_face_landmarks:
mpDraw.draw_landmarks(img, faceLms, mpFaceMesh.FACE_CONNECTIONS,
drawSpec, drawSpec)
for id, lm in enumerate(faceLms.landmark):`
# print(lm)
ih, iw, ic = img.shape
x, y = int(lm.x * iw), int(lm.y * ih)
print("Face id: ", id, x, y)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}', (20, 70), cv2.FONT_HERSHEY_PLAIN,
3, (255, 0, 0), 3)
cv2.imshow('image', img)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
how can i solve the problem around the indexes loop? i have tried other solutions here in similar questions, but i havent had any luck.
The variable i returns an integer value. This is not a list or tuple. If you change the for loop as follows, it will likely work.
for i in indices:
box = bbox[i]
# colors = np.random.uniform(0, 255, size=(len(box), 3))
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y), (x + w, h + y), color=(0, 255, 0), thickness=2)
cv2.putText(img, classNames[classIds[i] - 1].upper(), (box[0] + 10, box[1] + 30),
cv2.FONT_HERSHEY_PLAIN, 1, (0, 255, 0), 2)
print("Objects Ids: ", classIds)

How to detect the number plate with the most black pixels. If there is another method pls comment

Im trying to detect the ROI with the most black pixels for the license plate
Below are the code on the number plate. It is based of the question of How to recognize vehicle license / number plate (ANPR) from an image?.
I modified it a bit b
import cv2
import numpy as np
import imutils
import sys
import glob
import math
import time
import os
def validate_contour(contour, img, aspect_ratio_range, area_range):
rect = cv2.minAreaRect(contour)
img_width = img.shape[1]
img_height = img.shape[0]
box = cv2.boxPoints(rect)
box = np.int0(box)
X = rect[0][0]
Y = rect[0][1]
angle = rect[2]
width = rect[1][0]
height = rect[1][1]
angle = (angle + 180) if width < height else (angle + 90)
output = False
if (width > 0 and height > 0) and ((width < img_width / 2.0) and (height < img_width / 2.0)):
aspect_ratio = float(width) / height if width > height else float(height) / width
if (aspect_ratio >= aspect_ratio_range[0] and aspect_ratio <= aspect_ratio_range[1]):
if ((height * width > area_range[0]) and (height * width < area_range[1])):
box_copy = list(box)
point = box_copy[0]
del (box_copy[0])
dists = [((p[0] - point[0]) ** 2 + (p[1] - point[1]) ** 2) for p in box_copy]
sorted_dists = sorted(dists)
opposite_point = box_copy[dists.index(sorted_dists[1])]
tmp_angle = 90
if abs(point[0] - opposite_point[0]) > 0:
tmp_angle = abs(float(point[1] - opposite_point[1])) / abs(point[0] - opposite_point[0])
tmp_angle = rad_to_deg(math.atan(tmp_angle))
if tmp_angle <= 45:
output = True
return output
def deg_to_rad(angle):
return angle * np.pi / 180.0
def rad_to_deg(angle):
return angle * 180 / np.pi
def enhance(img):
kernel = np.array([[-1, 0, 1], [-2, 0, 2], [1, 0, 1]])
return cv2.filter2D(img, -1, kernel)
img = cv2.imread('13.jpg')
input_image = imutils.resize(img, width=500)
raw_image = np.copy(input_image)
img_original = input_image.copy()
img_mask = input_image.copy()
lic_plate = input_image.copy()
contoured = input_image.copy()
gray = cv2.cvtColor(img_original, cv2.COLOR_BGR2GRAY)
gray = enhance(gray)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
gray = cv2.Sobel(gray, -1, 1, 0)
h, sobel = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
se = cv2.getStructuringElement(cv2.MORPH_RECT, (16, 4))
binary = cv2.morphologyEx(sobel, cv2.MORPH_CLOSE, se)
ed_img = np.copy(binary)
contours, _ = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for contour in contours:
aspect_ratio_range = (2.2, 12) # minimum 2.2 , max 12
area_range = (500, 18000)
rectangles = cv2.minAreaRect(contour) # rect = ((center_x,center_y),(width,height),angle)
boxes = cv2.boxPoints(rectangles) # Find four vertices of rectangle from above rect
boxes = np.int0(boxes) # Round the values and make it integers
# print(box)
all_area = cv2.drawContours(contoured, [boxes], 0, (127, 0, 255), 2)
if validate_contour(contour, binary, aspect_ratio_range, area_range):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
y1 = min(Ys)
y2 = max(Ys)
angle = rect[2]
if angle < -45:
angle += 90
W = rect[1][0]
H = rect[1][1]
aspect_ratio = float(W) / H if W > H else float(H) / W
center = ((x1 + x2) / 2, (y1 + y2) / 2)
size = (x2 - x1, y2 - y1)
M = cv2.getRotationMatrix2D((size[0] / 2, size[1] / 2), angle, 1.0);
tmp = cv2.getRectSubPix(ed_img, size, center)
tmp = cv2.warpAffine(tmp, M, size)
TmpW = H if H > W else W
TmpH = H if H < W else W
tmp = cv2.getRectSubPix(tmp, (int(TmpW), int(TmpH)), (size[0] / 2, size[1] / 2))
__, tmp = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
sortedplate = cv2.drawContours(img_mask, [box], 0, (127, 0, 255), 2)
white_pixels = 0
for x in range(tmp.shape[0]):
for y in range(tmp.shape[1]):
if tmp[x][y] == 255:
white_pixels += 1
edge_density = float(white_pixels) / (tmp.shape[0] * tmp.shape[1])
tmp = cv2.getRectSubPix(raw_image, size, center)
tmp = cv2.warpAffine(tmp, M, size)
TmpW = H if H > W else W
TmpH = H if H < W else W
tmp = cv2.getRectSubPix(tmp, (int(TmpW), int(TmpH)), (size[0] / 2, size[1] / 2))
# getRectSubPix( = Retrieves a pixel rectangle from an image with sub-pixel accuracy.
if edge_density > 0.5:
cv2.drawContours(input_image, [box], 0, (127, 0, 255), 2)
cv2.imshow('original', img_original)
cv2.imshow('sobel', sobel)
cv2.imshow('binary', binary)
cv2.imshow("all contours", all_area)
cv2.imshow("sorted", sortedplate)
cv2.imshow("detected", lic_plate)
cv2.waitKey(0)
The image The number plate that needed to detect
Example of images lp1 lp2 lp3 lp4

How to remove shadow of moving object from image using opencv (python)?

I am trying to do background subtraction using MOG2, It was working fine, but when there is deep shadow of a moving object then the shadow is considered as foreground object and I don't want that shadow as foreground object (I'm running MOG2 for 13 images). How can I remove these shadow so that it does not come in foreground?
Here is a sample image...
original img
image after applying MOG2
here is my sample code...
import os
import numpy as np
import cv2
import glob
import imutils
i=0
bg_flag = 0
image_list = []
bgs_list = []
#bgsfinal function
def detection(image_list):
global i
global bg_flag
bgs3_img = None
backsub = cv2.createBackgroundSubtractorMOG2(128, cv2.THRESH_BINARY, 1)
print("start2")
for k in range(len(image_list)):
frame = image_list[k]
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imwrite('./gray/'+str(k)+'.jpg', frame)
#blur = cv2.medianBlur(frame, 21)
blur = frame
bgs_list.append(blur)
for bg in range(len(bgs_list)):
rects = []
#start_time = time.time()
frame_blur = bgs_list[bg]
img = image_list[bg].copy()
s_frame = image_list[bg]
new_frame = s_frame.copy()
fgmask = backsub.apply(frame_blur)
cv2.imwrite("./bgs/"+str(i)+".jpg", fgmask)
fgmask[fgmask==127] = 0
cv2.imwrite("./dilate/"+str(i)+".jpg", fgmask)
thresh = cv2.threshold(fgmask, 128, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations = 1)
thresh = cv2.dilate(thresh, None, iterations=1)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
#M = cv2.moments(c)
A = cv2.contourArea(c)
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(new_frame, (x, y), (x + w, y + h), (0,0, 255), 1)
cv2.putText(new_frame, str(A), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imwrite("./area/"+str(i)+".jpg", new_frame)
cv2.rectangle(thresh, (x, y), (x + w, y + h), (255,255, 255), 1)
cv2.putText(thresh, str(A), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imwrite("./area_bgs/"+str(i)+".jpg", thresh)
i+=1
print("Done!")
#this folder contains 13 continuous images
images = glob.glob('./inci4/*.jpg')
for j in range(len(images)):
img = cv2.imread(images[j])
img = cv2.resize(img, (360, 640))
image_list.append(img)
detection(image_list)

Object Detection Using Raspberry Pi and Android IP Camera with Python and OpenCV

Here is my code that I have used for object detection using raspberry pi and Android Ip Camera. Here I'm not getting any output and the code does not provide any errors. Can someone figure out what is the error?
import urllib.request
import cv2
import numpy as np
import datetime
import math
#global variables
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
MinCountourArea = 3000 #Adjust ths value according to your usage
BinarizationThreshold = 70 #Adjust ths value according to your usage
OffsetRefLines = 150 #Adjust ths value according to your usage
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
This is the code i have used to obtain the video stream from my IP camera
ReferenceFrame = None
while True:
camera=cv2.VideoCapture("http://192.168.1.6:8080/shot.jpg")
camera.set(3,640)
camera.set(4,480)
(ret,Frame)=camera.read()
height = np.size(Frame,0)
width = np.size(Frame,1)
#if cannot grab a frame, this program ends here.
if not ret:
break
This is the code part i have used to display the lines and frame for object detection and object counting
#gray-scale convertion and Gaussian blur filter applying
GrayFrame = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
GrayFrame = cv2.GaussianBlur(GrayFrame, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = GrayFrame
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, GrayFrame)
FrameThresh = cv2.threshold(FrameDelta, BinarizationThreshold, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
_, cnts, _ = cv2.findContours(FrameThresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
CoorYEntranceLine = (height / 2)-OffsetRefLines
CoorYExitLine = (height / 2)+OffsetRefLines
cv2.line(Frame, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (255, 0, 0), 2)
cv2.line(Frame, (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(Frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = (x+x+w)/2
CoordYCentroid = (y+y+h)/2
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
cv2.circle(Frame, ObjectCentroid, 1, (0, 0, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,CoorYEntranceLine,CoorYExitLine)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(Frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.putText(Frame, "Exits: {}".format(str(ExitCounter)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow('Salida',Frame)
cv2.waitKey(1);
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
The correct code
import numpy as np
import math
def nothing(x):
pass
width=0
height=0
EntranceCounter = 0
OffsetRefLines = 150
ExitCounter = 0
BinarizationThreshold = 70
MinCountourArea = 3000
cap = cv2.VideoCapture(0);
path="http://192.168.1.6:8080/video"
cap.open(path)
ReferenceFrame = None
#Check if an object in entering in monitored zone
def CheckEntranceLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYEntranceLine)
if ((AbsDistance <= 2) and (y < CoorYExitLine)):
return 1
else:
return 0
#Check if an object in exitting from monitored zone
def CheckExitLineCrossing(y, CoorYEntranceLine, CoorYExitLine):
AbsDistance = abs(y - CoorYExitLine)
if ((AbsDistance <= 2) and (y > CoorYEntranceLine)):
return 1
else:
return 0
#cv2.namedWindow("Tracking")
cv2.createTrackbar("LH", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LS", "Tracking", 0, 255, nothing)
cv2.createTrackbar("LV", "Tracking", 0, 255, nothing)
cv2.createTrackbar("UH", "Tracking", 255, 255, nothing)
cv2.createTrackbar("US", "Tracking", 255, 255, nothing)
cv2.createTrackbar("UV", "Tracking", 255, 255, nothing)
while True:
#frame = cv2.imread('smarties.png')
if cap.isOpened():
rval, frame = cap.read()
while rval:
rval,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = cv2.GaussianBlur(hsv, (21, 21), 0)
if ReferenceFrame is None:
ReferenceFrame = hsv
continue
#Background subtraction and image binarization
FrameDelta = cv2.absdiff(ReferenceFrame, hsv)
FrameThresh = cv2.threshold(FrameDelta, 25, 255, cv2.THRESH_BINARY)[1]
#Dilate image and find all the contours
FrameThresh = cv2.dilate(FrameThresh, None, iterations=2)
cnts, _ = cv2.findContours(FrameThresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
QttyOfContours = 0
#plot reference lines (entrance and exit lines)
cv2.line(frame, (0,170), (2000,170), (255, 0, 0), 5)
cv2.line(frame, (0,470), (2000,470), (0, 0, 255), 5)
#check all found countours
for c in cnts:
#if a contour has small area, it'll be ignored
if cv2.contourArea(c) < MinCountourArea:
continue
QttyOfContours = QttyOfContours+1
#draw an rectangle "around" the object
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#find object's centroid
CoordXCentroid = int(x+x+w)/2
CoordYCentroid = int(y+y+h)/2
ObjectCentroid = (x,y)
cv2.circle(frame, ObjectCentroid, 2, (0, 255, 0), 5)
if (CheckEntranceLineCrossing(CoordYCentroid,170,470)):
EntranceCounter += 1
if (CheckExitLineCrossing(CoordYCentroid,170,470)):
ExitCounter += 1
print ("Total countours found: "+str(QttyOfContours))
#Write entrance and exit counter values on frame and shows it
cv2.putText(frame, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 2, (250, 0, 1), 2)
cv2.putText(frame, "Exits: {}".format(str(ExitCounter)), (10, 110),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
imS = cv2.resize(frame, (400, 400)) # Resize image
#imSS = cv2.resize(mask, (200, 200))
#imSSS = cv2.resize(frame, (200, 200))
cv2.imshow("frame", imS)
#cv2.imshow("mask", imSS)
#cv2.imshow("res", imSSS)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()

Categories