OpenCV how to detect in specific position - python

Can i detect already in square fires in specific positions? I already have a code for detect fire and specific position for view but can i change to minimal position?
Also can i run a "main" (function name is "main") function every 5 minute but different times? Now my code:
import cv2
import numpy as np
import math
import time
import asyncio
from asyncio import sleep
yukseklik = int(input("Yukseklik giriniz "))
hiz = input("Hizi giriniz ")
global kez
kez=0
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([0, 80, 20])
high_red = np.array([35, 255, 255])
kernal = np.ones((5, 5), "uint8")
low_red1 = np.array([160, 100, 20])
high_red1 = np.array([190, 255, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
red = cv2.bitwise_and(frame, frame, mask=red_mask)
contours, hierarchy = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (213, 160), (426, 320), (255,255,255), 4)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area > 300):
x, y, w, h = cv2.boundingRect(contour)
imframe = cv2.rectangle(frame, (x-20, y+20),(x + w, y + h),(255, 255,255),2)
if(x>213 and x<426 and y<320 and y>160):
if(kez == 0):
def main():
g = 9.80
y = 2*(yukseklik-15)
u = float(y)/9.80
x_ = math.sqrt(u)
x_ = x_*float(hiz)
xi = float(yukseklik)*1.73205080756887729352744463415059
print("x= ",str(xi))
print("x'= ", str(x_))
t = (float(xi)-float(x_))/float(hiz)
print("t= ",t)
global kez
kez = kez+1
asyncio.sleep(5)
kez = 0
main()
cv2.imshow("Frame", frame)
cv2.imshow("Red", red)
key = cv2.waitKey(1)
if key == 27:
break
Output:

Related

NameError: name 'video' is not defined - Python 3.8

I have a problem with code below, this program code for fire detection from video files
When I run this code I have an error
in
video.release()
NameError: name 'video' is not defined
The video variable is already defined, and it's not clear to me why the error pops up
import cv2
import numpy as np
import sys
import math
title_window = 'Linear Blend'
cv2.namedWindow(title_window)
def on_trackbar(val):
pass
cv2.createTrackbar("Hmax", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Hmin", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Smax", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Smin", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Vmax", title_window , 0, 255, on_trackbar)
cv2.createTrackbar("Vmin", title_window , 0, 255, on_trackbar)
def detectFire(src): # src la anh
_, contours, _ = cv2.findContours(src, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) ## contours la danh sach cac mang mau trang
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours] ## lay dien tich cac contour
if len(contour_sizes) > 0:
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1] ## contour co dien toich lon nhat ## biggest_contour danh sach toa do cac pixel mau trang
mask = np.zeros(src.shape, np.uint8)
cv2.drawContours(mask, [biggest_contour], -1, 255, -1) ## chuyen tu biggest_contour qua anh mask
rect = cv2.boundingRect(biggest_contour)
return mask, rect
else:
return src, [0,0,0,0]
if len(sys.argv) == 2:
# load video file from first command line argument
video = cv2.VideoCapture("Fire_smoke.mp4") # doc video tu duong dan
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)); # lay gia tri do rong video
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) # do cao
fps = video.get(cv2.CAP_PROP_FPS) # lay gia tri fps, frame per secoond, so khung tren 1 giay
frame_time = round(1000/fps);
last_white = 0
ret, frame = video.read()
pause = False
sum_diff = 0
loop = 0
while True:
if pause is False:
ret, frame = video.read()
if not ret:
print("... end of video file reached");
break
cv2.imshow("origin", frame)
Hmax = cv2.getTrackbarPos('Hmax',title_window)
Hmin = cv2.getTrackbarPos('Hmin',title_window)
Smax = cv2.getTrackbarPos('Smax',title_window)
Smin = cv2.getTrackbarPos('Smin',title_window)
Vmax = cv2.getTrackbarPos('Vmax',title_window)
Vmin = cv2.getTrackbarPos('Vmin',title_window)
# lower = [Hmin, Smin, Vmin]
# upper = [Hmax, Smax, Vmax]
lower = [6, 152, 138] # gioi han duoi mau lua trong he mau HSV
upper = [48, 248, 255]
blur = cv2.GaussianBlur(frame, (21, 21), 0) # loc nhieu
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV) # chuyen doi khong gian mau
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
mask = cv2.inRange(hsv, lower, upper) #
cv2.imshow("resdfd", mask) ## mask la anh trang den cua cac vung co mau lua
mask, rect = detectFire(mask)
detected_edges = cv2.Canny(mask, 100, 200, 3) ## lay bien anh
n_white = np.sum(detected_edges == 255)
diff_white = n_white - last_white
last_white = n_white
if diff_white > 0 and diff_white < 100:
sum_diff += diff_white
loop += 1
if loop > 50:
loop = 0
if sum_diff > 150:
print("fire")
else:
print("clear")
print("fluctuation:", sum_diff)
sum_diff = 0
cv2.imshow("cen", detected_edges)
# image display and key handling
output = cv2.bitwise_and(frame, frame, mask=mask)
x, y, w, h = rect
cv2.rectangle(output,(x,y),(x+w,y+h),(0,255,0),2)
cv2.line(output, (width-10, height-10), (width-10, height-10-int(n_white*height/5000)), (0,0,255), 5)
cv2.imshow(title_window, output)
if cv2.waitKey(10) == ord('x'):
break
elif cv2.waitKey(10) == ord('p'):
pause = True
elif cv2.waitKey(10) == ord('r'):
pause = False
cv2.destroyAllWindows()
video.release()
I tried to change the variables but I don't understand the cause of the error, I'm very weak in programming

cant see point marker on rviz ros

my aim is to follow a green ball with the camera and show the movements of this ball in the view. To do this, I marked the middle of the ball with a marker, but I can't see it in the rviz. The code or rviz does not give any errors, and I am able to visualize /webcam topic but markers dont show up. What could be the problem? I am using ros neotic Thank you
'''
#!/usr/bin/env python3
import rospy
from sensor_msgs.msg import Image
import cv2
import numpy as np
from cv_bridge import CvBridge, CvBridgeError
import sys
from collections import deque
from imutils.video import VideoStream
from visualization_msgs.msg import Marker, MarkerArray
from geometry_msgs.msg import Point
import gc
import argparse
import time
import imutils
def camdet():
bridge = CvBridge()
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="Path to the (optional) video file")
ap.add_argument("-b", "--buffer", default=64, type=int, help="max buffer size")
args = vars(ap.parse_args())
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
if not args.get("video", False):
vs = VideoStream(src=0).start()
else:
vs = cv2.VideoCapture(args["video"])
time.sleep(2.0)
rospy.init_node("object_detection",anonymous=True)
while True:
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
if frame is None:
break
frame = imutils.resize(frame, width=600)
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
center = None
v = 0
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M['m10']/M['m00']), int(M['m01']/M['m00']))
if radius > 10:
cv2.circle(frame, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
v += 1
pts.append(center)
marker = Marker()
marker.header.frame_id = "base_link"
marker.ns = "marked"
marker.id = 0
marker.type = marker.POINTS
marker.action = marker.ADD
marker.scale.x = 1
marker.scale.y = 1
marker.scale.z = 0.1
marker.pose.orientation.x = 0
marker.pose.orientation.y = 0
marker.pose.orientation.z= 0
marker.pose.orientation.w = 1
points = [0,0]
for i in range(v):
p = Point()
p.x = int(M['m10']/M['m00'])
p.y = int(M['m01']/M['m00'])
points.clear()
points.append(p)
marker.points = points
print(marker.points)
marker.pose.position.x = int(M['m10']/M['m00'])
marker.pose.position.y = int(M['m01']/M['m00'])
marker.color.a = 0.4
marker.color.r = 0.9
marker.color.g = 0.1
marker.color.b = 0.2
marker.lifetime = rospy.Duration()
img_pub = rospy.Publisher('webcam/image_raw', Image, queue_size=10)
rate = rospy.Rate(10)
msg = bridge.cv2_to_imgmsg(frame, encoding="bgr8")
img_pub.publish(msg)
marker_pub = rospy.Publisher("marker", Marker, queue_size = 100)
marker_pub.publish(marker)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
if not args.get("video", False):
vs.stop()
else:
vs.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
camdet()
except rospy.ROSInterruptException:
pass
'''
ROS WIKI said "Don't forget to set color.a = 1 or your marker will be invisible!".

How to remove shadow of moving object from image using opencv (python)?

I am trying to do background subtraction using MOG2, It was working fine, but when there is deep shadow of a moving object then the shadow is considered as foreground object and I don't want that shadow as foreground object (I'm running MOG2 for 13 images). How can I remove these shadow so that it does not come in foreground?
Here is a sample image...
original img
image after applying MOG2
here is my sample code...
import os
import numpy as np
import cv2
import glob
import imutils
i=0
bg_flag = 0
image_list = []
bgs_list = []
#bgsfinal function
def detection(image_list):
global i
global bg_flag
bgs3_img = None
backsub = cv2.createBackgroundSubtractorMOG2(128, cv2.THRESH_BINARY, 1)
print("start2")
for k in range(len(image_list)):
frame = image_list[k]
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imwrite('./gray/'+str(k)+'.jpg', frame)
#blur = cv2.medianBlur(frame, 21)
blur = frame
bgs_list.append(blur)
for bg in range(len(bgs_list)):
rects = []
#start_time = time.time()
frame_blur = bgs_list[bg]
img = image_list[bg].copy()
s_frame = image_list[bg]
new_frame = s_frame.copy()
fgmask = backsub.apply(frame_blur)
cv2.imwrite("./bgs/"+str(i)+".jpg", fgmask)
fgmask[fgmask==127] = 0
cv2.imwrite("./dilate/"+str(i)+".jpg", fgmask)
thresh = cv2.threshold(fgmask, 128, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations = 1)
thresh = cv2.dilate(thresh, None, iterations=1)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
#M = cv2.moments(c)
A = cv2.contourArea(c)
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(new_frame, (x, y), (x + w, y + h), (0,0, 255), 1)
cv2.putText(new_frame, str(A), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imwrite("./area/"+str(i)+".jpg", new_frame)
cv2.rectangle(thresh, (x, y), (x + w, y + h), (255,255, 255), 1)
cv2.putText(thresh, str(A), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imwrite("./area_bgs/"+str(i)+".jpg", thresh)
i+=1
print("Done!")
#this folder contains 13 continuous images
images = glob.glob('./inci4/*.jpg')
for j in range(len(images)):
img = cv2.imread(images[j])
img = cv2.resize(img, (360, 640))
image_list.append(img)
detection(image_list)

shape and centroid of desired color for following code

im a beginner to opencv python. I am trying to detect the shape, as well as the centroid of the colored object (detected object within the color range) on this code. PLEASE HELP.Thanks in advance.
CODE:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2, math
import numpy as np
class ColourTracker:
def __init__(self):
cv2.namedWindow("ColourTrackerWindow", cv2.CV_WINDOW_AUTOSIZE)
self.capture = cv2.VideoCapture(0)
self.scale_down = 4
def run(self):
while True:
f, orig_img = self.capture.read()
#orig_img = cv2.flip(orig_img, 1)
#img = cv2.GaussianBlur(orig_img, (5,5), 0)
#laplacian = cv2.Laplacian(orig_img,cv2.CV_64F)
#sobelx = cv2.Sobel(orig_img,cv2.CV_64F,1,0,ksize=5)
#sobely = cv2.Sobel(orig_img,cv2.CV_64F,0,1,ksize=5)
img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)
img = cv2.resize(img, (len(orig_img[0]) / self.scale_down, len(orig_img) / self.scale_down))
boundaries = [([0, 150, 0], [5, 255, 255])]#,([50, 140, 10], [255, 255, 255]),([10, 150, 180], [255, 255, 255])]
for (lower, upper) in boundaries:
lower = np.array(lower,np.uint8)
upper = np.array(upper,np.uint8)
binary = cv2.inRange(img, lower, upper)
dilation = np.ones((15, 15), "uint8")
binary = cv2.dilate(binary, dilation)
#edge = cv2.Canny(red_binary,200,300,apertureSize = 3)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
largest_contour = None
for idx, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
largest_contour = contour
for cnt in largest_contour:
approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
print len(approx)
if len(approx)==14:
print "circle"
#cv2.drawContours(orig_img,[cnt], 0, (0, 0, 255), 2)
if not largest_contour == None:
moment = cv2.moments(largest_contour)
if moment["m00"] > 1000 / self.scale_down:
rect = cv2.minAreaRect(largest_contour)
rect = ((rect[0][0] * self.scale_down, rect[0][1] * self.scale_down), (rect[1][0] * self.scale_down, rect[1][1] * self.scale_down), rect[2])
#box = cv2.cv.BoxPoints(rect)
#box = np.int0(box)
#cv2.drawContours(img,[cnt],0,255,-1)
cv2.drawContours(orig_img,[cnt], 0, (0, 0, 255), 2)
cv2.imshow("ColourTrackerWindow", orig_img)
if cv2.waitKey(20) == 27:
cv2.destroyWindow("ColourTrackerWindow")
self.capture.release()
break
if __name__ == "__main__":
colour_tracker = ColourTracker()
colour_tracker.run()
:
code:
` #!/usr/bin/env python
-- coding: utf-8 --
import cv2, math
import numpy as np
class ColourTracker:
def init(self):
cv2.namedWindow("ColourTrackerWindow", cv2.CV_WINDOW_AUTOSIZE)
self.capture = cv2.VideoCapture(1)
self.scale_down = 4
def run(self):
while True:
f, orig_img = self.capture.read()
#orig_img = cv2.flip(orig_img, 1)
img = cv2.GaussianBlur(orig_img, (5,5), 0)
img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)
img = cv2.resize(img, (len(orig_img[0]) / self.scale_down, len(orig_img) / self.scale_down))
boundaries = [([0, 150, 150], [5, 255, 255]),
([40, 80, 10], [255, 255, 255]),
([190, 150, 100], [255, 255, 255])]
for (lower, upper) in boundaries:
lower = np.array(lower,np.uint8)
upper = np.array(upper,np.uint8)
binary = cv2.inRange(img, lower, upper)
dilation = np.ones((15, 15), "uint8")
binary = cv2.dilate(binary, dilation)
canny = cv2.Canny(binary,100,200)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
largest_contour = None
for idx, contour in enumerate(contours):
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
largest_contour = contour
if not largest_contour == None:
moment = cv2.moments(largest_contour)
if moment["m00"] > 1000 / self.scale_down:
rect = cv2.minAreaRect(largest_contour)
rect = ((rect[0][0] * self.scale_down, rect[0][1] * self.scale_down), (rect[1][0] * self.scale_down, rect[1][1] * self.scale_down), rect[2])
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
cv2.drawContours(orig_img,[box], 0, (0, 0, 255), 2)
cv2.imshow("ColourTrackerWindow", orig_img)
cv2.imshow("SHAPE", canny)
if cv2.waitKey(20) == 27:
cv2.destroyWindow("ColourTrackerWindow")
self.capture.release()
break
if name == "main":
colour_tracker = ColourTracker()
colour_tracker.run()`'

CamShift + Face detection in OpenCv

I'm currently combing two examples from OpenCv which let you detect your face and track object. The purpose is to first detect the face and then track it.
My code currently :
import numpy as np
import cv2
import cv2.cv as cv
import video
import math
cascade = 0
counter = 0
class App(object):
def __init__(self, video_src):
self.cam = video.create_capture(video_src)
ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
self.selection = None
self.drag_start = None
self.tracking_state = 0
self.show_backproj = False
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
'''
#param: img the image for the face detection
#param: cascade the cascade of the ViolaJones face detection
#return: rects, an array of the cornors of the detected face. [x1 y1 x2 y2]
'''
def detect(self,img, cascade):
# Detect the faces
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(150, 150), flags = cv.CV_HAAR_SCALE_IMAGE)
# Check if any faces are detected
if len(rects) == 0:
# return empty array
return []
else:
# Get the correct x and y values
rects[:,2:] += rects[:,:2]
# loop over the recs and shrink the width with 40%
for rec in rects:
rec[0] = rec[0] + int(math.floor(((rec[2] - rec[0])*0.4)/2))
rec[2] = rec[2] - int(math.floor(((rec[2] - rec[0])*0.4)/2))
return rects
def draw_rects(self,img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
def getFace(self,img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = self.detect(gray, cascade)
self.rects = rects
img = self.draw_rects(img, rects, (0, 255, 0))
if len(rects) != 0:
self.selection = rects[0][1], rects[0][0], rects[0][3], rects[0][2]
return rects
def run(self):
counter= 0
rects = None
while True:
counter +=1;
ret, self.frame = self.cam.read()
vis = self.frame.copy()
if counter % 150 == 0:
rects = self.getFace(vis);
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if rects is not None:
self.draw_rects(vis, rects, (0, 255, 0))
if self.selection:
print 'test0'
x0, y0, x1, y1 = self.selection
self.track_window = (x0, y0, x1-x0, y1-y0)
hsv_roi = hsv[x0:x1,y0:y1]
mask_roi = mask[x0:x1,y0:y1]
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
self.hist = hist.reshape(-1)
self.show_hist()
vis_roi = vis[x0:x1,y0:y1]
cv2.bitwise_not(vis_roi, vis_roi)
vis[mask == 0] = 0
self.tracking_state = 1
self.selection = None
if self.tracking_state == 1:
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
if self.show_backproj:
vis[:] = prob[...,np.newaxis]
try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
except: print track_box
cv2.imshow('camshift', vis)
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
break
if ch == ord('b'):
self.show_backproj = not self.show_backproj
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys, getopt
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try: video_src = video_src[0]
except: video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt.xml")
cascade = cv2.CascadeClassifier(cascade_fn)
App(video_src).run()
Currently I show where the face was initially (in a green rectangle) and what is tracked at the moment (in a red oval). I am able to detect the face, but the face tracker keeps tracking all other stuff except for my face (always on one or two shoulders). I suspected it had something to do with the coordinates, but I've checked them and they seem fine (mask_roi, hsv_roi, vis_roi). An example :
Can anybody point out my mistake ?
I was unable to run your code (no module named video). I'm using OpenCV 2.4.4 and my solution to your problem is as follows:
Make sure your face is properly lit (no shadows, bright natural skin color, dark background helps a lot)
play with hsv values in mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))). I'm using those: np.array((0., 51., 89.)), np.array((17., 140., 255.))
Tip:
you could make a window just for your mask so you can see how well it works
after: cv2.namedWindow('camshift') put cv2.namedWindow('mask')
and after: mask = cv2.inRange... put cv2.imshow('mask', mask) or mask_roi.

Categories