when calculating optical flow, why frame has no value? - python

so I'm trying to implement this code for optical flow
but when I try to call cv2.imshow('frame',img) I get an error because img, frame, and mask are all "None".
can anybody help me understand where the problem is?
here's my code:
import cv2
import numpy as np
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0,255,(100,3))
cap = cv2.VideoCapture(0)
while( cap.isOpened() ) :
ret,old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
mask = np.zeros_like(old_frame)
while (1):
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
good_new = p1[st==1]
good_old = p0[st==1]
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
cv2.imshow('frame',img)
k = cv2.waitKey(30) & 0xff
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()

You have
mask = cv2.line(...)
frame = cv2.circle(...)
But cv2.line and cv2.circle return None.

Related

Is there any alternative for pixel by pixel for loops in opencv?

I am trying to perform pixel by pixel operation to threshold the frames from live camera but it shows Not Responding. I think its because of for loops used for comparing each pixel. It would really help if there's an alternative for thresholding images based on specific conditions. Below is the code for better understanding :
import cv2
import numpy as np
import imutils
import time
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
args = vars(ap.parse_args())
if args.get("video", None) is None:
cap = cv2.VideoCapture(0)
time.sleep(2.0)
else:
cap = cv2.VideoCapture(args["video"])
firstFrame = None
while True:
_,frame = cap.read()
if np.shape(frame) == ():
cap = cv2.VideoCapture(args["video"])
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(21,21),0)
if firstFrame is None:
firstFrame = gray
continue
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25,255,cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations = 2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
if len(cnts) != 0:
c = max(cnts, key = cv2.contourArea)
(x,y,w,h) = cv2.boundingRect(c)
ROI = frame[x:x+w, y:y+h]
(a,b,c) = np.shape(ROI)
if a==0 or b==0 or c==0:
continue
img = ROI
Rule1 = img.copy()
Rule2 = img.copy()
Rule3 = img.copy()
Rule4 = img.copy()
Rule5 = img.copy()
YCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
Y = YCrCb[:,:,0]
Cr = YCrCb[:,:,1]
Cb = YCrCb[:,:,2]
Ym = int(np.mean(Y))
Crm = int(np.mean(Cr))
Cbm = int(np.mean(Cb))
h = img.shape[0]
w = img.shape[1]
for x in range(0,h) :
for y in range(0,w) :
Rule1[x,y] = 255 if Y[x,y] > Cb[x,y] else 0
Rule2[x,y] = 255 if Cr[x,y] > Cb[x,y] else 0
Rule3[x,y] = 255 if abs(int(Cb[x,y])-int(Cr[x,y]))>=70 else 0
Rule4[x,y] = 255 if Y[x,y]>Ym or Cb[x,y]>Cbm or Cr[x,y]>Crm else 0
Rule5[x,y] = 255 if Cb[x,y]<=120 and Cr[x,y]>=150 else 0
FireRegion = cv2.bitwise_and(Rule1,Rule2)
FireRegion = cv2.bitwise_and(FireRegion,Rule3)
FireRegion = cv2.bitwise_and(FireRegion,Rule4)
FireRegion = cv2.bitwise_and(FireRegion,Rule5)
else :
continue
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow("frame",frame)
cv2.imshow("FireRegion",FireRegion)
time.sleep(0.2)
else :
cv2.imshow("frame",frame)
key = cv2.waitKey(1)
if key ==27 :
break

Stuck with the issue: src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4 in function 'getPerspectiveTransform'

I know that this issue has other posts, but none of them helped me:
import cv2 as cv
import numpy as np
widthImg = 640
heightImg = 480
frameWidth = 640
frameHeight = 480
cap = cv.VideoCapture(2)
cap.set(3, widthImg)
cap.set(4, heightImg)
cap.set(10,150)
def preProcessing(img):
imgGray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
imgBlur = cv.GaussianBlur(imgGray,(5,5),1)
imgCanny = cv.Canny(imgBlur,200,200)
kernel = np.ones((5,5))
imgDial = cv.dilate(imgCanny,kernel,iterations=2)
imgThres = cv.erode(imgDial,kernel,iterations=1)
return imgThres
def getContours(img):
biggest = np.array([])
maxArea = 0
contours,hierarchy = cv.findContours(img,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv.contourArea(cnt)
if area>5000:
#cv.drawContours(imgContour, cnt, -1, (255,0,0), 3)
peri = cv.arcLength(cnt,True)
approx = cv.approxPolyDP(cnt,0.02*peri,True)
if area >maxArea and len(approx) == 4:
biggest = approx
maxArea = area
cv.drawContours(imgContour, biggest, -1, (255, 0, 0), 20)
return biggest
def getWarp(img,biggest):
pts1 = np.array(biggest,np.float32)
pts2 = np.array([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]],np.float32)
matrix = cv.getPerspectiveTransform(pts1, pts2)
imgOutput = cv.warpPerspective(img, matrix, (widthImg, heightImg))
return imgOutput
while True:
success, img = cap.read()
img = cv.resize(img,(widthImg,heightImg))
imgContour = img.copy()
imgThres = preProcessing(img)
biggest = getContours(imgThres)
print(biggest)
imgWarped = getWarp(img,biggest)
cv.imshow("Result", imgWarped)
if cv.waitKey(1) & 0xFF == ord('q'):
break
So this is my code in Pycharm. I know that the problem came when I use 'biggest' at the 44 line (pts1 = np.array(biggest,np.float32) I know that if I write 4 two-dimensional points instead 'biggest' it will work. But in a video that i'm following, the programmer used 'biggest' without any problem. I know that it already said that 'biggest' has 4 points (in def getContours(img):). I don't know why is not working in my case and in others yes.
And this is the error message that I get:
Traceback (most recent call last):
File "/home/nvidia/PycharmProjects/OpencvPython/Resources/Project2.py", line 61, in <module>
imgWarped = getWarp(img,biggest)
File "/home/nvidia/PycharmProjects/OpencvPython/Resources/Project2.py", line 46, in getWarp
matrix = cv.getPerspectiveTransform(pts1, pts2)
cv2.error: OpenCV(4.5.1) /tmp/pip-req-build-q3gzfcr4/opencv/modules/imgproc/src/imgwarp.cpp:3392: error: (-215:Assertion failed) src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4 in function 'getPerspectiveTransform'
Sorry if this is a stupid issue but I'm new in python programming and I spend a couple of hours trying to solve it.
use this thats how i solved it.
import cv2
import numpy as np
cap = cv2.VideoCapture(1)
#widht id:3, height id:4, brightness id:10.
cap.set(3,640)
cap.set(4,480)
width,height = 640,480
def preprocessing(img):
imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(img,(5,5),1)
imgCanny = cv2.Canny(imgBlur,150,150)
kernel = np.ones((5,5))
imgDial = cv2.dilate(imgCanny,kernel,iterations=2)
imgErode = cv2.erode(imgDial,kernel,iterations=1)
return imgErode
def contour(img):
biggest = np.array([])
maxArea = 0
contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 5000:
#cv2.drawContours(imgcontour,cnt,-1,(255,0,0),3)
peri = cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,0.02*peri,True)
if area > maxArea and len(approx) == 4:
biggest = approx
maxArea = area
cv2.drawContours(imgcontour,biggest,-1,(255,0,0),20)
return biggest
def reorder(myPoints):
pass
def warp(img,biggest):
pts1 = np.float32(biggest)
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgout = cv2.warpPerspective(img,matrix,(width,height))
return imgout
while True:
success, img = cap.read()
cv2.resize(img,(width,height))
imgcontour = img.copy()
imgThres = preprocessing(img)
biggest = contour(imgThres)
print(biggest)
if biggest.size != 0:
imgwarp = warp(img,biggest)
cv2.imshow('video', imgwarp)
else:
cv2.imshow('video',imgThres)
if cv2.waitKey(1) & 0xFF == 27:
break

Detect high variation of optical flow in video

I need to detect the high variation of the optical flow in the video. For example the crossroads. Two cars are driving and they have some value of optical flow. Next, in some time period, they have collision, so it will produce a high variation of optical flow. How to detect it?
optical flow with binarization and mask
expecting the result to fire event when the variation of the optical flow high
how to capture this event?
def label_flows(flows):
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
flags = cv.KMEANS_RANDOM_CENTERS
h, w = flows.shape[:2]
labeled_flows = []
flows = flows.reshape(h*w, -1)
comp, labels, centers = cv.kmeans(flows, 2, None, criteria, 10, flags)
n = np.sum(labels == 1)
camera_motion_label = np.argmax([labels.size-n, n])
labeled = np.uint8(255*(labels.reshape(h, w) == camera_motion_label))
return labeled
def find_target_in_labeled_flow(labeled_flow):
labeled_flow = cv2.bitwise_not(labeled_flow)
bw = 10
h, w = labeled_flow.shape[:2]
border_cut = labeled_flow[bw:h-bw, bw:w-bw]
conncomp, stats = cv2.connectedComponentsWithStats(border_cut, connectivity=8)[1:3]
target_label = np.argmax(stats[1:, cv2.CC_STAT_AREA]) + 1
img = np.zeros_like(labeled_flow)
img[bw:h-bw, bw:w-bw] = 255*(conncomp == target_label)
return img
def put_optical_flow_arrows_on_image(image, optical_flow_image, threshold=2.0, skip_amount=30):
image = image.copy()
if len(image.shape) == 2:
image = np.stack((image,)*3, axis=2)
flow_start = np.stack(np.meshgrid(range(optical_flow_image.shape[1]), range(optical_flow_image.shape[0])), 2)
flow_end = (optical_flow_image[flow_start[:,:,1],flow_start[:,:,0],:1]*3 + flow_start).astype(np.int32)
norm = np.linalg.norm(flow_end - flow_start, axis=2)
norm[norm < threshold] = 0
nz = np.nonzero(norm)
for i in range(0, len(nz[0]), skip_amount):
y, x = nz[0][i], nz[1][i]
cv.arrowedLine(image,
pt1=tuple(flow_start[y,x]),
pt2=tuple(flow_end[y,x]),
color=(0, 255, 0),
thickness=1,
tipLength=.2)
return image
if __name__ =='__main__':
cap = cv.VideoCapture("video.mp4")
ret, first_frame = cap.read()
prev_gray = cv.cvtColor(first_frame, cv.COLOR_BGR2GRAY)
mask = np.zeros_like(first_frame)
mask[..., 1] = 255
cv.namedWindow('input',cv.WINDOW_NORMAL)
cv.namedWindow('binarized',cv.WINDOW_NORMAL)
cv.namedWindow('dense_optical_flow',cv.WINDOW_NORMAL)
cv.namedWindow('color', cv.WINDOW_NORMAL)
while(cap.isOpened()):
ret, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
flow = cv.calcOpticalFlowFarneback(prev_gray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
magnitude, angle = cv.cartToPolar(flow[..., 0], flow[..., 1])
mask[..., 0] = angle * 180 / np.pi / 2
mask[..., 2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX)
rgb = cv.cvtColor(mask, cv.COLOR_HSV2BGR)
binary_flow = label_flows(flow)
optical_flow_arrows = put_optical_flow_arrows_on_image(gray, flow)
hsv = cv.cvtColor(optical_flow_arrows, cv.COLOR_BGR2HSV)
mask_green = cv.inRange(hsv, (36, 25, 25), (70, 255,255))
imask = mask_green>0
green = np.zeros_like(optical_flow_arrows, np.uint8)
green[imask] = optical_flow_arrows[imask]
# Here I need to calculate the variation of the optical flow
# Any ideas about how to do it?
cv.imshow("binarized", binary_flow)
cv.imshow("dense_optical_flow", optical_flow_arrows)
cv.imshow('color', green)
prev_gray = gray
if cv.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv.destroyAllWindows()

Blob detection in Python - Video not opening

import numpy as np
import cv2
cap = cv2.VideoCapture('car.avi')
size = (int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
fourcc = cv2.cv.CV_FOURCC(*'AVID')
video = cv2.VideoWriter('6.avi', fourcc, 25, size)
while(1):
ret, frame = cap.read()
if not ret:
break
frame = cv2.convertScaleAbs(frame)
params = cv2.SimpleBlobDetector_Params()
params.blobColor = 0
params.filterByColor = True
params.minArea = 0
params.filterByArea = False
params.minThreshold = 120;
params.maxThreshold = 255;
detector = cv2.SimpleBlobDetector(params)
keypoints = detector.detect(frame)
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if ret == True:
video.write(im_with_keypoints)
cv2.imshow('frame', im_with_keypoints)
else:
cap.release()
video.release()
break
k = cv2.waitKey(10) & 0xff
if k == 27:
break
I modified the code you have, and now it can run. I used python3.5.
import numpy as np
import cv2
cap = cv2.VideoCapture(r'E:/test.mp4')
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
video = cv2.VideoWriter(r'E:/6.avi', fourcc, 25, size)
while(1):
ret, frame = cap.read()
if not ret:
break
frame = cv2.convertScaleAbs(frame)
params = cv2.SimpleBlobDetector_Params()
params.blobColor = 0
params.filterByColor = True
params.minArea = 0
params.filterByArea = False
params.minThreshold = 120;
params.maxThreshold = 255;
ver = (cv2.__version__).split('.')
if int(ver[0]) < 3:
detector = cv2.SimpleBlobDetector(params)
else:
detector = cv2.SimpleBlobDetector_create(params)
keypoints = detector.detect(frame)
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
if ret == True:
video.write(im_with_keypoints)
cv2.imshow('frame', im_with_keypoints)
else:
cap.release()
break
k = cv2.waitKey(10) & 0xff
if k == 27:
break

OpenCV example error - TypeError: 'NoneType' object is not subscriptable

I'm trying to run python example from OpenCV site:
http://docs.opencv.org/trunk/d7/d8b/tutorial_py_lucas_kanade.html
import numpy as np
import cv2
cap = cv2.VideoCapture('slow.flv')
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
img = cv2.add(frame,mask)
cv2.imshow('frame',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()
I use Python 3 to run this example
It works but only for 5-15 seconds for my videos and then stops with next error:
Traceback (most recent call last): File "o.py", line 28, in
good_new = p1[st==1] TypeError: 'NoneType' object is not subscriptable
What can be wrong in this example?
That happens if the all the optical flow objects (the color dots on your screen) go out of the frame. Do this- if the array p1 is empty, find features again and then calculate optical flow. That should work.
Add this in the while loop( But it simply fills your entire screen with lines over time) :
if p1 is None:
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
p1, st, err = cv2.calcOpticalFlowPyrLK(...., **lk_params)

Categories