I need to use the YOLO algorithm for schoolwork.
I have to know the center of any object detected by YOLO, that's why I don't use a YOLO GitHub, because I don't know how to have this data.
I just followed a YTB video (https://www.youtube.com/watch?v=xKK2mkJ-pHU&t=161s&ab_channel=Pysource) and copy the code in Spyder (my python launcher).
Here is the code: (I'm French that's why I have French quotes, and a standard English level !)
import cv2
import numpy as np
import time
#--------------------- Charger YOLOv3 ---------------------
# Charger model YOLOv3
net = cv2.dnn.readNet('yolov3.weights','yolov3.cfg')
# Charger le nom des classes de la BDD COCO
class_names = []
with open("coco.names", "r") as f:
class_names = [line.strip() for line in f.readlines()]
# Charger les layers (??)
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(class_names), 3))
#--------------------- Charger l'image ---------------------
# Prendre le code camera.py
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_PLAIN
starting_time = time.time()
frame_id = 0
while True:
_, frame = cap.read()
frame_id += 1
height, width, channels = frame.shape
#------------------- Détecter les objets -------------------
# On décompose la frame pour les 3 couleurs RVB
blob = cv2.dnn.blobFromImage(frame, 0.004, (320, 320), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
#--------------- Information sur les objets ----------------
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# L'objet est maintenant détecté
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
# cv2.circle(img, (center_x, center_y), 10, (0, 255, 0), 2)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.4, 0.3)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(class_names[class_ids[i]])
confidence = confidences[i]
color = colors[class_ids[i]]
cv2.rectangle(frame, (x,y), (x+ w, y + h), color, 2)
cv2.putText(frame, label + " " + str(round(confidence, 2)), (x, y + 30), font, 3, color, (255,255,255), 3)
time_past = time.time() - starting_time
fps = frame_id / time_past
cv2.putText(frame, "FPS: " + str(round(fps, 1)), (150, 50), font, 3, (0, 255, 0), 4)
cv2.imshow("Image", frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
When I run this script, on the 'cv2.imshow(...)' window, I can see the webcam from my phone (thanks to iVCam app, with a cable link), but the problem is the following.
I just don't detect any object, I mean YOLO doesn't detect anything. :/
I don't think it's installation issues because I already do it well (I think...), and I can see the version of cv2 on python
>>> print(cv2.__version)
4.6.0
I don't know if past with iVCam app can cause some trouble or not.
If someone can fix the problem, I would be really happy !
Related
I am using OpenCv and Trained model and trying assign IDs to the cars with the help of Centroid. There are the following scenarios:
at the beginning all recognized cars get centroid and ID
the cars that leave the frame, their IDs should be removed
new cars that enter into the frame (later) should get new IDs
I have the ID code from a youtube Video , but it dosen't work as it should. the Remove method , removes everything.
If I leave the update method on, the Cars get new IDs every frame. that should not happen.
I am new to the programming and I would appreciate if someone could help me here out.
import cv2
import numpy as np
import pandas as pd
import math
video_name = "videos.mp4"
cap = cv2.VideoCapture(video_name)
net = cv2.dnn.readNetFromONNX("best.onnx")
classes = ['car', 'free_space']
count = 0
center_points_prev_frame = []
tracking_objects = {}
track_id = 0
while True:
ret, img = cap.read()
# ret, frame1 = cap.read()
count += 1
if img is None:
break
img = cv2.resize(img, (1500, 1000))
# frame1 = cv2.resize(frame1, (1500, 1000))
blob = cv2.dnn.blobFromImage(img, scalefactor=1 / 255,
size=(640, 640),
mean=[0, 0, 0, 0],
swapRB=True,
crop=False)
net.setInput(blob)
detections = net.forward()[0]
# print(detections.shape)
# Aufbau: cx, cy, w, h, confidence, class_score
classes_ids = []
confidences = []
boxes = []
rows = detections.shape[0]
img_width, img_height = img.shape[1], img.shape[0]
x_scale = img_width / 640
y_scale = img_height / 640
# apply Non-Maximum Suppression
for i in range(rows):
row = detections[i]
confidence = row[4]
if confidence > 0.3:
classes_score = row[5:]
ind = np.argmax(classes_score)
if classes_score[ind] > 0.3:
classes_ids.append(ind)
confidences.append(confidence)
cx, cy, w, h = row[:4]
x1 = int((cx - w / 2) * x_scale)
y1 = int((cy - h / 2) * y_scale)
# print("X1:",x1 ,"Y1",y1)
width = int(w * x_scale)
height = int(h * y_scale)
box = np.array([x1, y1, width, height])
boxes.append(box)
indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.3, 0.3)
# Point current frame
center_points_cur_frame = []
for i in indices:
x1, y1, w, h = boxes[i]
label = classes[classes_ids[i]]
conf = confidences[i]
text = label + "{:.2f}".format(conf)
if label == 'car':
car_coordinates = [(x1, y1), (x1 + w, y1 + h)]
#cv2.rectangle(img, (x1, y1), (x1 + w, y1 + h), (51, 51, 255), 2)
# center points
cx = int((x1 + x1 + w) / 2)
cy = int((y1 + y1 + h) / 2)
cv2.circle(img, (cx,cy), 3, (255, 0, 255), -1)
cv2.putText(img, str(track_id), (cx,cy), cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 0, 255), 1)
center_points_cur_frame.append((cx, cy))
# Only at the beginning we compare previous and current frame
if count <= 2:
for pt in center_points_cur_frame:
for pt2 in center_points_prev_frame:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
if distance < 20:
tracking_objects[track_id] = pt
track_id += 1
else:
tracking_objects_copy = tracking_objects.copy()
center_points_cur_frame_copy = center_points_cur_frame.copy()
for object_id, pt2 in tracking_objects_copy.items():
object_exists = False
for pt in center_points_cur_frame_copy:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
# Update IDs position
if distance < 20:
tracking_objects[object_id] = pt
object_exists = True
if pt in center_points_cur_frame:
center_points_cur_frame.remove(pt)
continue
############################### Problem ##########################################
# Remove IDs lost
if not object_exists:
tracking_objects.pop(object_id)
# Add new IDs found
for pt in center_points_cur_frame:
tracking_objects[track_id] = pt
track_id += 1
############################### Problem ##########################################
for object_id, pt in tracking_objects.items():
cv2.circle(img, pt, 3, (255, 0, 255), -1)
cv2.putText(img, str(object_id), (pt[0], pt[1] - 2), cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 0, 255), 1)
print("Tracking objects")
print(tracking_objects)
print("CUR FRAME LEFT PTS")
print(center_points_cur_frame)
# Make a copy of the points
center_points_prev_frame = center_points_cur_frame.copy()
cv2.imshow("Video", img)
cv2.waitKey(1)
# After the loop release the cap object
cap.release()
# Destroy all the windows
cv2.destroyAllWindows()
I'm working on a project trying to do object detection and text detection using both yolo and easyocr. Since I'm a beginner and really new to computer vision, I would be glad if someone can help me.
Here's the code:
import cv2
import numpy as np
import easyocr
# Load Yolo
net = cv2.dnn.readNet('yolov4-tiny-custom_3000.weights', 'yolov4-tiny-custom.cfg')
classes = []
with open("obj.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
cap = cv2.VideoCapture('car1.mp4')
# Declare Ocr
cascade_src = 'haarcascade_russian_plate_number.xml'
cascade = cv2.CascadeClassifier(cascade_src)
reader = easyocr.Reader(['en'], gpu = False)
# Declare Ocr
while True:
_, frame = cap.read()
height, width, channels = frame.shape
#frame = cv2.resize(frame, (800, 600))
# Yolo Detection
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), cv2.FONT_HERSHEY_PLAIN, 3, color, 3)
print("Jenis Mobil: " +label)
# Text Reader Using Ocr
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
plate = cascade.detectMultiScale(gray, 1.1, 5)
for x,y,w,h in plate:
wT,hT,cT = frame.shape
a,b = (int(0.02*wT),int(0.02*hT))
plate2 = frame[y+a:y+h-a,x+b:x+w-b,:]
cv2.rectangle(frame,(x,y),(x+w,y+h),(60,60,255),2)
cv2.rectangle(frame,(x-1,y-40),(x+w+1,y),(60,60,255),-1)
result = reader.readtext(plate2)
for detek in result:
top_left = (int(detek[0][0][0]), int(detek[0][0][1]))
bottom_right = (int(detek[0][2][0]), int(detek[0][2][1]))
text = detek[1]
cv2.putText(frame,text,(x,y-10),cv2.FONT_HERSHEY_SIMPLEX,0.9,(255,255,255),2)
print("Nomor Kendaran: " + text)
# Text Reader Using Ocr
cv2.imshow("Detection", frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
I am trying to use ROI to detect the object but I am not able to do it.
Any advice please?
Crop the image before it is fed to the model
while True:
_, frame = cap.read()
im_crop = im[y1:y2, x1:x2] # set x1,x2,y1,y2 based on your ROI
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
this will speed up the inference time as well as there is less data to process by the model
I have the following code which i am trying to use to detect objects. I have made some changes and now i am trying to make it work so that it will use a camera to detect objects. What it does now is takes a picture in a loop and doesnt use a video.
Here is my code.
import numpy as np
import cv2
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layers_names = net.getLayerNames()
outputlayers = [layers_names[i - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
class_ids = []
confidences = []
boxes = []
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
height, width, channels = image.shape
blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), (0, 0, 0), True,
crop=False)
net.setInput(blob)
outs = net.forward(outputlayers)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence >= 0.5:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[3] * width)
h = int(detection[3] * height)
x = int(center_x - width / 2)
y = int(center_y - height / 2)
boxes.append([x, y, width, height])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
objects_detecetd = len(boxes)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, width, height = boxes[i]
label = str(classes[class_ids[i]])
color = colors[i]
print(label)
cv2.rectangle(image, (x, y), (x + width, y + height), color, 2)
cv2.putText(image, label, (x, y + 30), font, 4, color, 3)
cv2.imshow("Image", image)
cv2.waitKey(5)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
cap.release
My opencv code is lagging in raspbpi but in pc its smooth. Can anyone help me make my hard code to a code that uses threading.
from cv2 import cv2
import numpy as np
from pyzbar.pyzbar import decode
import pickle,time
import os
import imutils
import screeninfo
from screeninfo import get_monitors
curr_path = os.getcwd()
#########models##################################################
print("Loading face detection model")
proto_path = os.path.join(curr_path, 'model', 'deploy.prototxt')
model_path = os.path.join(curr_path, 'model', 'res10_300x300_ssd_iter_140000.caffemodel')
face_detector = cv2.dnn.readNetFromCaffe(prototxt=proto_path, caffeModel=model_path)
print("Loading face recognition model")
recognition_model = os.path.join(curr_path, 'model', 'openface_nn4.small2.v1.t7')
face_recognizer = cv2.dnn.readNetFromTorch(model=recognition_model)
################pickles#########################################
recognizer = pickle.loads(open('recognizer.pickle', "rb").read())
le = pickle.loads(open('le.pickle', "rb").read())
print("Starting test video file")
#adjacents########################################################################
no_of_adjacent_prediction=0
no_face_detected=0 #to track the number of times the face is detected
prev_predicted_name='' #to keep track of the previously predicted face(w.r.t frame)
count_frames = total_no_face_detected = 0
#camera#########################################################################
font=cv2.FONT_HERSHEY_SIMPLEX
clr=(255,255,255)
cap = cv2.VideoCapture(0)
time.sleep(2)
profile = None
####TRY_COUNTS###########
MAX_TRY=3
tries=0 #
######flags############
flag = True
flag_face_recognised=False #to keep track if the user face is recognized
flag_face_not_recognised=False
#############FULLSCREEN###############
WINDOW_NAME = "Face-Rcognition and QRCODEQQQQQ"
screenid = 0
while True:
cv2.namedWindow(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
ret, frame = cap.read()
screen = screeninfo.get_monitors()[screenid]
screen_width, screen_height = screen.width,screen.height
frame = cv2.flip(frame, 1)
frame_height, frame_width, _ = frame.shape
scaleWidth = float(screen_width) / float(frame_width)
scaleHeight = float(screen_height) / float(frame_height)
if (flag):
access = open("AccessCodes.txt")
for i in decode(frame):
decoded_data = i.data.decode("utf-8") # converts bytes to string value
print(decoded_data)
# Drawing polygon on frame (tilts w.r.t orientation)
pts = np.array([i.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(frame, [pts], True, (0, 255, 0), 1)
# print(pts)
# Display text
rect_pts = i.rect # using rect point as origin for text as we don't want the text to tilt with the qrcode
fontScale = 0.8
thickness = 1
# cv2.putText(frame,decoded_data,(rect_pts[0],rect_pts[1]),cv2.FONT_HERSHEY_SIMPLEX,fontScale,(255,0,0),thickness)
# print(rect_pts)
if decoded_data.lower() in access.read(): # Check private key
flag = False
tries = 0
print("QRCODE is Valid.Proceed to FaceRecog")
time_out_no_of_frames_after_qrcode = 0
else:
# print("INVALID QR CODE")
print("Invalid QRCODE")
if scaleHeight > scaleWidth:
imgScale = scaleWidth
else:
imgScale = scaleHeight
newX, newY = frame.shape[1] * imgScale, frame.shape[0] * imgScale
frame = cv2.resize(frame, (int(newX), int(newY)))
cv2.imshow(WINDOW_NAME, frame)
else:
frame = cv2.resize(frame, (int(newX), int(newY)))
(h, w) = frame.shape[:2]
image_blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0), False, False)
face_detector.setInput(image_blob)
face_detections = face_detector.forward()
for i in range(0, face_detections.shape[2]):
confidence = face_detections[0, 0, i, 2]
if confidence > 0.90:
box = face_detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
face_blob = cv2.dnn.blobFromImage(face, 1.0/255, (96, 96), (0, 0, 0), True, False)
face_recognizer.setInput(face_blob)
vec = face_recognizer.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}".format(name, proba )
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(frame, "Welcome home " + name.replace('_', ' ').title(), (160, 460), font, 0.8, clr,
thickness+3, cv2.LINE_AA)
cv2.rectangle(frame, (startX, startY), (endX, endY), (255, 255, 255), 1)
if name == decoded_data.lower():
print("Face is Recognised: "+str(no_of_adjacent_prediction))
no_of_adjacent_prediction += 1
else:
print("Face not Recognised.")
cv2.putText(frame, "Face Not Recognised", (160, 460), font, 0.8, clr, thickness, cv2.LINE_AA)
flag_face_not_recognised = True
no_of_adjacent_prediction = 0
if (no_of_adjacent_prediction > 10): # no_of_adjacent_prediction is only updated when the confidence of classification is >80
flag_face_recognised = True
no_of_adjacent_prediction = 0
no_face_detected = 0
cv2.imshow(WINDOW_NAME, frame)
if (flag_face_recognised): # if face is recognized then open the door
# arduino.write(bytes('o', 'utf-8')) #Output the given byte string over the serial port.
print("DOOR is OPEN")
time.sleep(5)
# speak("Closing door")
# arduino.write(bytes('c', 'utf-8')) #Output the given byte string over the serial port.
print("DOOR is CLOSED")
flag_face_recognised = False
flag = True # to start from qrcode
if (flag_face_not_recognised):
# speak("Face not recognised. The door will remain closed")
time.sleep(2)
flag_face_not_recognised = False
tries += 1
if (tries >= MAX_TRY):
flag = True # to start from qrcode
tries = 0
if (time_out_no_of_frames_after_qrcode >= 400):
# speak("User authentication failed due to time out")
flag = True # to start from qrcode
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows()
FPS PC: 20 fps
FPS RASPBERRY PI : 9 fps
i tried various opencv codes and the result is still the same. I found a solution that threading increases the fps of opencv but i do not know how to apply this to my code due the fact that i am a noob python kid. a help would be nice. I want my fps in my raspberry ranging from 15-20 instead of 9 fps.
I'm trying to use pillow.grab to get frames of my screen. For later detect objects for every frame.
import numpy as np
import cv2
from PIL import ImageGrab as ig
import time
# Load Yolo
net = cv2.dnn.readNet("yolov3_training_last.weights", "yolov3_testing.cfg")
# Name custom object
classes = ["amongus"]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
last_time = time.time()
while(True):
img_ = ig.grab(bbox=None)
open_cv_image = np.array(img_)
open_cv_image = open_cv_image[:, :, ::-1].copy()
img = cv2.resize(open_cv_image, None, fx=0.4, fy=0.4)
height, width, channels = img.shape
# Detecting objects
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.3:
# Object detected
print(class_id)
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
print(indexes)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
color = colors[class_ids[i]]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
#cv2.putText(img, label, (x, y + 30), font, 3, color, 2)
cv2.imshow("Image", np.array(img_))
#key = cv2.waitKey(0)
cv2.destroyAllWindows()
If I uncomment the #key = cv2.waitKey(0) works because I'm taking just one frame for every time I press a key, but once I comment that part, I got this screen.
I'm assuming (not sure at all if it's because of that) that this is happening because a lot of frames are coming, but if I sleep(n) the while fps will be so low. (I guess?)
What's happening / how to fix it?