How to find direction turn by turn using camera in opencv python? - python

Below code is finding edges and yellow lines.Basically I would like to print left turn as yellow line turn left.
import cv2
import numpy as np
video = cv2.VideoCapture(0)
while True:
ret, orig_frame = video.read()
if not ret:
video = cv2.VideoCapture(0)
continue
frame = cv2.GaussianBlur(orig_frame, (5, 5), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
low_yellow = np.array([18, 94, 140])
up_yellow = np.array([48, 255, 255])
mask = cv2.inRange(hsv, low_yellow, up_yellow)
edges = cv2.Canny(mask, 75, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, maxLineGap=50)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 5)
cv2.imshow("frame", frame)
cv2.imshow("edges",edges)
key = cv2.waitKey(25)
if key == 27:
break
video.release()
cv2.destroyAllWindows()

Related

OpenCV how to detect in specific position

Can i detect already in square fires in specific positions? I already have a code for detect fire and specific position for view but can i change to minimal position?
Also can i run a "main" (function name is "main") function every 5 minute but different times? Now my code:
import cv2
import numpy as np
import math
import time
import asyncio
from asyncio import sleep
yukseklik = int(input("Yukseklik giriniz "))
hiz = input("Hizi giriniz ")
global kez
kez=0
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([0, 80, 20])
high_red = np.array([35, 255, 255])
kernal = np.ones((5, 5), "uint8")
low_red1 = np.array([160, 100, 20])
high_red1 = np.array([190, 255, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
red = cv2.bitwise_and(frame, frame, mask=red_mask)
contours, hierarchy = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (213, 160), (426, 320), (255,255,255), 4)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area > 300):
x, y, w, h = cv2.boundingRect(contour)
imframe = cv2.rectangle(frame, (x-20, y+20),(x + w, y + h),(255, 255,255),2)
if(x>213 and x<426 and y<320 and y>160):
if(kez == 0):
def main():
g = 9.80
y = 2*(yukseklik-15)
u = float(y)/9.80
x_ = math.sqrt(u)
x_ = x_*float(hiz)
xi = float(yukseklik)*1.73205080756887729352744463415059
print("x= ",str(xi))
print("x'= ", str(x_))
t = (float(xi)-float(x_))/float(hiz)
print("t= ",t)
global kez
kez = kez+1
asyncio.sleep(5)
kez = 0
main()
cv2.imshow("Frame", frame)
cv2.imshow("Red", red)
key = cv2.waitKey(1)
if key == 27:
break
Output:

opencv traffic light detection

import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
cap = cv2.VideoCapture(0)
while True :
ret ,frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20,0,0])
upper_yellow = np.array([40,255,255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(frame,frame, mask= mask)
img = cv2.medianBlur(res, 5)
ccimg = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cimg = cv2.cvtColor(ccimg, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20,param1=50, param2=30, minRadius=20, maxRadius=30)
if circles is not None:
print("circle is found")
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('detected circles', cimg)
cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
I am tring to detect trafict light by using opencv , initially i want to detect yellow color by using HSV space and then median filtering and finding the circle but it raises errors async ReadSample() call is failed with error status: -1072873821 and OnReadSample() is called with error status: -1072873821 probably errors are caused by the if state for checking if it finds any circle or not also the error is a long list but these two are repeated.
The capture device is failing to read a frame. The OnReadSample() call is failing on cap.read() and you should implement logic to handle a frame not being read. I've demonstrated this below:
import cv2
import numpy as np
import warnings
warnings.filterwarnings("ignore")
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
if ret == True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([20, 0, 0])
upper_yellow = np.array([40, 255, 255])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
res = cv2.bitwise_and(frame, frame, mask=mask)
img = cv2.medianBlur(res, 5)
ccimg = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cimg = cv2.cvtColor(ccimg, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg, cv2.HOUGH_GRADIENT, 1, 20, param1=50, param2=30, minRadius=20, maxRadius=30)
if circles is not None:
print("circle is found")
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(cimg, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('detected circles', cimg)
cv2.imshow('res', res)
else:
print("Read Failed")
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()

Augmented Reality line that moves with an image in OpenCV

I want to do the following things in openCV. The problem statement that I have is with a bottle, which needs a line on the image and the line needs to rotate as per the movement of bottle.
The first image needs to have red lines as the borders and initiate a green line
The second image needs to have the green line in the middle when the bottle gets rotated. That is the green line has to follow the rotation of the bottle
Finally as per the third image, the application needs to kill itself or save the picture when the green line gets aligned to the red line
I tried doing this in OpenCV using template matching. I tried keeping a template image and then tracking the template image using template matching algorithm. But it does not seem to work properly in this case.
import cv2
from time import sleep
import numpy as np
vid = cv2.VideoCapture(0)
sleep(2)
line_show = False
save_reference = False
template_compare_method = cv2.TM_SQDIFF_NORMED
i = 0
while True:
check, frame = vid.read()
print(check)
frame1 = cv2.line(frame, (500, 0), (500, 720), (255, 0, 0), 7)
frame1 = cv2.line(frame1, (800, 0), (800, 720), (255, 0, 0), 7)
if line_show:
h, w = frame1.shape[:2]
if not save_reference:
reference = frame1[200:500, 780:790]
cv2.imwrite("../../images/white_image.jpg", reference)
save_reference = True
if save_reference:
reference_image = cv2.imread('../../images/white_image.jpg')
result = cv2.matchTemplate(reference_image, frame1, template_compare_method)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
MPx, MPy = mnLoc
trows, tcols = reference_image.shape[:2]
frame1 = cv2.rectangle(frame1, (MPx, MPy), (MPx+tcols, MPy+trows), (0, 0, 255), 2)
cv2.imshow("image", frame1)
key = cv2.waitKey(1)
if key == ord('l'):
line_show = True
if key == ord('k'):
cv2.imwrite("../../images/saved_image_"+str(i)+".jpg", frame1)
i = i + 1
if key == ord('s'):
cv2.imwrite("../../images/saved_image.jpg", frame)
vid.release()
print("Image saved")
break
elif key == ord('q'):
vid.release()
cv2.destroyAllWindows()
break
Can I use any other algorithms, or am I approaching this problem in a wrong way by looking it as a object tracking task, where I save a small image and track it through template matching ?
Can I use some other algorithms like Meanshift, Frame Difference etc. to achieve this ?
If I were you, I would solve this problem using line algorithm. Of course, you can choose any other robust algorithm. My idea is to solve the problem as quickly as possible.
Assume I have the following image with left and right boundaries (blue), and I have the green-line.
When green-line passes the left-border, quit.
Tracking the green-line
First you need to find the features of the frame to track efficiently the green-line.
while True:
ret, frm = cap.read()
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
Sample output:
Second, find the approximate length of the green-line:
There is no direct way to find the length, do error-trial calculation.
Once you are sure, initialize the line algorithm.
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
Third, get the coordinates, and check if the green-line is in the border.
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
if x1 <= 232:
break
Code:
import cv2
cap = cv2.VideoCapture("sample.mp4")
while True:
ret, frm = cap.read()
if ret:
rgt_bdr = cv2.line(frm, (794, 250), (794, 1250), (255, 0, 0), 7)
lft_bdr = cv2.line(frm, (232, 250), (232, 1250), (255, 0, 0), 7)
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
cv2.line(frm,
pt1=(x1, y1),
pt2=(x2, y2),
color=(0, 255, 0),
thickness=3)
print("({}, {})-({}, {})".format(x1, y1, x2, y2))
if x1 <= 232:
break
cv2.imshow("frm", frm)
cv2.waitKey(1)

Save both input video and output video in Open CV python

i write a code from Open CV Document about Motion Detection .i want to save my input as a video and my output as a video but i have problems. i save video but i can just save the output video and some time the input video is the same out put video.my input video is from my computer's camera and it's like a normal video but the output is that video in addition by squares around motion objects.i need your help.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()
i tried this but it doesn't work.
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
(grabbed, frame) = cap.read()
fshape = frame.shape
fheight = fshape[0]
fwidth = fshape[1]
print (fwidth , fheight)
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (fwidth, fheight))
out1 = cv.VideoWriter('input.avi', fourcc, 20.0, (fwidth, fheight))
ret, frame1 = cap.read()
ret, frame2 = cap.read()
while cap.isOpened():
diff = cv.absdiff(frame1, frame2)
gray = cv.cvtColor(diff, cv.COLOR_BGR2GRAY)
blur = cv.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv.threshold(blur, 20, 255, cv.THRESH_BINARY)
dilated = cv.dilate(thresh, None, iterations=3)
contours, _ = cv.findContours(dilated, cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv.boundingRect(contour)
if cv.contourArea(contour) < 100:
continue
cv.rectangle(frame1, (x, y), (x+w, y+h), (0, 0, 255), 3)
Final_Movie = cv.putText(frame1, "Status: {}".format('Movement'), (10,
20), cv.FONT_HERSHEY_DUPLEX,
1, (0, 0, 255), 3)
#cv.drawContours(frame1, contours, -1, (0, 0, 255), 3)
cv.imshow("feed", frame1)
cv.imshow("feed1", blur)
cv.imshow("feed2", gray)
cv.imshow("feed3", diff)
cv.imshow("feed4", thresh)
cv.imshow("feed5", dilated)
#cv.imshow("feed6", contours)
if ret == True:
out.write(Final_Movie)
out.write(frame2)
frame1 = frame2
ret, frame2 = cap.read()
if cv.waitKey(40) == 27:
break
cv.destroyAllWindows()
cap.release()

How to put bounding box around the detected human outline

Here is the python code I have written :-
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
How to put bounding box around the detected human outline and improve efficiency of the python code to perform background subtraction on the live video feed taken from webcam. Can someone help?
Drawing Contour Using Background Subtraction
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
gray=cv2.cvtColor(fgmask,cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(gray,25,255,cv2.THRESH_BINARY)
_,contours,hierarchy = cv2.findContours(th1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1000 and area < 40000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(fgmask,(x,y),(x+w,y+h),(255,0,0),2)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Drawing Contour using HSV Masking and Convex Hull
Set value for hsv mask.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
frame = cv2.imread(frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower = np.array([50,103,40])
upper = np.array([255,255, 255])
mask = cv2.inRange(hsv, lower, upper)
fg = cv2.bitwise_and(frame, frame, mask=255-mask)
fg = cv2.cvtColor(fg.copy(),cv2.COLOR_HSV2BGR)
fg = cv2.cvtColor(fg,cv2.COLOR_BGR2GRAY)
fg = cv2.threshold(fg, 120,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
#plt.imshow(fg)
#plt.show()
fgclosing = cv2.morphologyEx(fg.copy(), cv2.MORPH_CLOSE, kernel)
se = np.ones((3,3),np.uint8)
#fgdilated = cv2.morphologyEx(fgclosing, cv2.MORPH_CLOSE,cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (4,4)))
fgdilated = cv2.dilate(fgclosing, kernel = se , iterations = 8)
img = frame.copy()
ret, threshed_img = cv2.threshold(fgdilated,
127, 255, cv2.THRESH_BINARY)
image, contours, hier = cv2.findContours(threshed_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
#print(cv2.contourArea(cnt))
if cv2.contourArea(cnt) > 44000:
# get convex hull
hull = cv2.convexHull(cnt)
#cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
#print(hull)
(x,y,w,h) = cv2.boundingRect(cnt)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
contours = hull
#c1 = max(contours, key=cv2.contourArea)
hull = cv2.convexHull(cnt)
c = hull
#print(c)
cv2.drawContours(img, [hull], -1, (0, 0, 255), 1)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
cv2.drawContours(img, [c], -1, (0, 255, 255), 2)
cv2.circle(img, extLeft, 8, (0, 0, 255), -1)
cv2.circle(img, extRight, 8, (0, 255, 0), -1)
cv2.circle(img, extTop, 8, (255, 0, 0), -1)
cv2.circle(img, extBot, 8, (255, 255, 0), -1)
lx = extLeft[1]
ly = extLeft[0]
rx = extRight[1]
ry = extRight[0]
tx = extTop[1]
ty = extTop[0]
bx = extBot[1]
by = extBot[0]
x,y = lx,by
w,h = abs(rx-lx),abs(ty-by)
#cv2.rectangle(img, (x,y), (x+w,y+h), (255, 0, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,str(extLeft[0])+','+str(extLeft[1]),(extLeft), font, 2,(0, 0, 255),2,cv2.LINE_AA)
cv2.putText(img,str(extRight[0])+','+str(extRight[1]),(extRight), font, 2,(0, 255, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extTop[0])+','+str(extTop[1]),(extTop), font, 2,(255, 0, 0),2,cv2.LINE_AA)
cv2.putText(img,str(extBot[0])+','+str(extBot[1]),(extBot), font, 2,(255, 255, 0),2,cv2.LINE_AA)
im = frame[tx:bx,ly:ry,:]
cx = im.shape[1]//2
cy = im.shape[0]//2
cv2.circle(im, (cx,cy), 15, (0, 255, 0))
plt.imshow(img)
plt.show()
You can use findContours.
import cv2
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help = "path to the (optional) video file")
args = vars(ap.parse_args())
if not args.get("video", False):
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args["video"])
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while True:
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
mask = 255 - fgmask
_, contours, _ = cv2.findContours(
mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
fgmask = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR)
for contour in contours:
area = cv2.contourArea(contour)
#only show contours that match area criterea
if area > 500 and area < 20000:
rect = cv2.boundingRect(contour)
x, y, w, h = rect
cv2.rectangle(fgmask, (x, y), (x+w, y+h), (0, 255, 0), 3)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I have tested with the video https://github.com/opencv/opencv/blob/master/samples/data/vtest.avi

Categories