How cap.get(cv2.CAP_PROP_POS_MSEC) works? - python

I have a video with this information:
fps = 29.971695558454563
number of frames = 26983
duration (S) = 900.2827333333333
duration (M:S) = 15:0.28273333333333994
My question is: Why when i use:
cap.set(cv2.CAP_PROP_POS_FRAMES, 30)
The :
cap.get(cv2.CAP_PROP_POS_MSEC)
Return :
0.5
It shouldnt be something close to 1000 mls?
cap = cv2.VideoCapture(video_path)
fps = cap.get(cv2.CAP_PROP_FPS)
cap.set(cv2.CAP_PROP_POS_FRAMES, 30)
print('FPS VIDEO',fps)
print('Mils',cap.get(cv2.CAP_PROP_POS_MSEC))
timestamp = get_hms(cap.get(cv2.CAP_PROP_POS_MSEC))
FPS VIDEO 29.971695558454563
Mils 0.5
hora 0:00:00.000500
def get_hms(millis):
hms = timedelta(milliseconds=millis)
print('hora',hms)
return hms

Well it is just a bug:
https://github.com/opencv/opencv/issues/15749
It will just work if you use :
ret, frame = cap.read()
Before call :
print('Mils',cap.get(cv2.CAP_PROP_POS_MSEC))

Related

Array creation too slow

I am trying to create an image array from scratch.
I got the code running but it takes arrounds 30 secs to run it.
I feel it could be faster by using numpy native functions.
How can I do this?
import cv2
import numpy as np
import time
volumes = np.random.randint(low=0, high=200, size=10000)
print(volumes)
image_heigh = 128
image_width = 256
image_channel = 3
show_img = False
def nomralized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
start_time = time.time()
for ii in range(len(volumes)-image_width):
# ===================== part to optimize start
final_image = np.zeros((image_heigh, image_width, image_channel))
start = ii
end = ii + image_width
current_vols = volumes[start:end]
# nomalize data
vol_min = 0
vol_max = np.max(current_vols)
vol_norm = nomralized(data=current_vols,
data_min=vol_min,
data_max=vol_max,
maximum_value=image_heigh)
for xxx in range(image_width):
final_image[:int(vol_norm[xxx]), xxx, :] = 1
# ===================== part to optimize end
if show_img:
image = np.float32(final_image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.imshow("ok", image)
cv2.waitKey(27)
print("total running time: ", (time.time() - start_time))
How can I do to make this image array creation faster?
I need to create the image every timesteps because I want to simulate real live data stream that come every new timesteps.
This is why I would like to optimize only this part of the code :
for xxx in range(image_width):
final_image[:int(vol_norm[xxx]), xxx, :] = 1
How can I do this?
First simplest optimizations are next:
Use comparing values to np.arange(...) instead of inner loop.
Use gray image instead of 3-channels RGB. 3 times less data to process.
Use np.uint8 type instead of np.float32, which is faster to process and doesn't need conversion to float32 for CV2 visualizing.
All these above optimizations give huge speedup (10x times), and my running time is 2.6 sec instead of 27 sec before.
Also another very useful optimization that I didn't do is that you don't need to recompute previous image pixels in a case when max/min of whole data within current window didn't change. You need to recompute previous image data only in the case if max/min changed. And I expect that your real-life data is gradually changing like Forex or Bitcoin prices, hence max/min change within a window is very non-often.
Optimizations 1)-3) mentioned above are implemented in the next code:
import cv2
import numpy as np
import time
volumes = np.random.randint(low=0, high=200, size=10000)
print(volumes)
image_heigh = 128
image_width = 256
image_channel = 3
show_img = False
def nomralized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
start_time = time.time()
aranges = np.arange(image_heigh, dtype = np.int32)[:, None]
for ii in range(len(volumes)-image_width):
# ===================== part to optimize start
#final_image = np.zeros((image_heigh, image_width, image_channel), dtype = np.float32)
start = ii
end = ii + image_width
current_vols = volumes[start:end]
# nomalize data
vol_min = 0
vol_max = np.max(current_vols)
vol_norm = nomralized(data=current_vols,
data_min=vol_min,
data_max=vol_max,
maximum_value=image_heigh)
final_image = (aranges < vol_norm[None, :].astype(np.int32)).astype(np.uint8) * 255
# ===================== part to optimize end
if show_img:
cv2.imshow('ok', final_image)
cv2.waitKey(27)
print("total running time: ", (time.time() - start_time))
For above code I just did one more optimization of inner loop which speed-up code above even 2x times more to have timings of 1.3 sec. But also I put back 3 channels plus float32, this reduced speed resulting in final 2.8 sec, here is the code
Another next optimization is possible if re-computing old images data is not needed.
Main thing to be optimized was that you were re-computing almost same whole image on each step with 1 pixel shift-step along width. Instead of this you need to compute whole image once, then shift right not 1 pixel but whole image width.
Then after this optimization running time is 0.08 sec.
And do 1 pixel stepping only for showing animation, not for computing image data, image data should be computed just once if you need speed.
import cv2
import numpy as np
import time
volumes = np.random.randint(low=0, high=200, size=10000)
print(volumes)
image_heigh = 128
image_width = volumes.size #256
image_channel = 3
screen_width = 256
show_img = False
def nomralized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
start_time = time.time()
for ii in range(0, len(volumes), image_width):
# ===================== part to optimize start
final_image = np.zeros((image_heigh, image_width, image_channel))
start = ii
end = ii + image_width
current_vols = volumes[start:end]
# nomalize data
vol_min = 0
vol_max = np.max(current_vols)
vol_norm = nomralized(data=current_vols,
data_min=vol_min,
data_max=vol_max,
maximum_value=image_heigh)
for xxx in range(image_width):
final_image[:int(vol_norm[xxx]), xxx, :] = 1
# ===================== part to optimize end
if show_img:
for start in range(0, final_image.shape[1] - screen_width):
image = np.float32(final_image[:, start : start + screen_width])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.imshow("ok", image)
cv2.waitKey(27)
print("total running time: ", (time.time() - start_time))
I also created animation image out of your data:
If you want to create same animation just append next piece of code to the end of script above:
# Needs: python -m pip install pillow
import PIL.Image
imgs = [PIL.Image.fromarray(final_image[:, start : start + screen_width].astype(np.uint8) * 255) for start in range(0, final_image.shape[1] - screen_width, 6)]
imgs[0].save('result.png', append_images = imgs[1:], save_all = True, lossless = True, duration = 100)
I've implemented also simulation of real-time live stream data rendering/visualizing.
live_stream() generator spits out random amount of data at random points of time, this is to simulate data generation process.
stream_fetcher() listens to live stream and records all data received to python queue q0, this fetcher is run in one thread.
renderer() gets data recorded by fetcher and renders it into image through your mathematical formulas and normalization process, it renders as much data as available, resulting in images with varying widths, rendered images are saved to another queue q1.
visualizer() visualizes rendered data by fetching as much rendered images as available.
All functions run in separate threads not to block whole process. Also if any of threads works to slow then it skips some of data to catch-up with current real-time data, thus every queue doesn't overflow.
Also you may see that visualized process is jumpy, it is not because functions are somewhat slow, but because live stream spits out different amount of data in each time step, this is how usually real-time data may behave.
In the next code I did also extra optimization mentioned before, that is not-recomputing image if min/max didn't change.
import cv2, numpy as np
import time, random, threading, queue
image_height = 256
image_width = 512
# Make results reproducible and deterministic
np.random.seed(0)
random.seed(0)
def live_stream():
last = 0.
while True:
a = np.random.uniform(low = -1., high = 1., size = random.randint(1, 20)).astype(np.float64).cumsum() + last
yield a
last = a[-1]
time.sleep(random.random() * 0.1)
q0 = queue.Queue()
def stream_fetcher():
for e in live_stream():
q0.put(e)
threading.Thread(target = stream_fetcher, daemon = True).start()
aranges = np.arange(image_height, dtype = np.int32)[:, None]
q1 = queue.Queue()
def renderer():
def normalized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
prev_image = np.zeros((image_height, 0), dtype = np.uint8)
prev_vols = np.zeros((0,), dtype = np.float64)
while True:
data = []
data.append(q0.get())
try:
while True:
data.append(q0.get(block = False))
except queue.Empty:
pass
vols = np.concatenate(data)[-image_width:]
prev_vols = prev_vols[-(image_width - vols.size) or prev_vols.size:]
concat_vols = np.concatenate((prev_vols, vols))[-image_width:]
vols_min, vols_max = np.amin(concat_vols), np.amax(concat_vols)
if prev_vols.size > 0 and (vols_min < np.amin(prev_vols) - 10 ** -8 or vols_max > np.amax(prev_vols) + 10 ** -8):
vols = concat_vols
prev_image = prev_image[:, :-prev_vols.size]
prev_vols = prev_vols[:0]
vols_norm = normalized(
data = vols, data_min = vols_min,
data_max = vols_max, maximum_value = image_height,
)
image = (aranges < vols_norm.astype(np.int32)[None, :]).astype(np.uint8) * 255
whole_image = np.concatenate((prev_image, image), axis = 1)[:, -image_width:]
q1.put(whole_image)
prev_image = whole_image
prev_vols = concat_vols
threading.Thread(target = renderer, daemon = True).start()
def visualizer():
imgs = []
while True:
data = []
data.append(q1.get())
try:
while True:
data.append(q1.get(block = False))
except queue.Empty:
pass
image = np.concatenate(data, axis = 1)[:, -image_width:]
cv2.imshow('ok', image)
cv2.waitKey(1)
if imgs is not None:
try:
# Needs: python -m pip install pillow
import PIL.Image
has_pil = True
except:
has_pil = False
imgs = None
if has_pil:
imgs.append(PIL.Image.fromarray(np.pad(image, ((0, 0), (image_width - image.shape[1], 0)), constant_values = 0)))
if len(imgs) >= 1000:
print('saving...', flush = True)
imgs[0].save('result.png', append_images = imgs[1:], save_all = True, lossless = True, duration = 100)
imgs = None
print('saved!', flush = True)
threading.Thread(target = visualizer, daemon = True).start()
while True:
time.sleep(0.1)
Above live process simulation is rendered into result.png which I show down below:
I've also decided to improve visualization, by using more advanced matplotlib instead of cv2 to be able to show axes and doing real-time plot drawing. Visualization image is down below:
Next is a matplotlib-based code corresponding to last image above:
import cv2, numpy as np
import time, random, threading, queue
image_height = 256
image_width = 512
save_nsec = 20
dpi, fps = 100, 15
# Make results reproducible and deterministic
np.random.seed(0)
random.seed(0)
def live_stream():
last = 0.
pos = 0
while True:
a = np.random.uniform(low = -1., high = 1., size = random.randint(1, 30)).astype(np.float64).cumsum() + last
yield a, pos, pos + a.size - 1
pos += a.size
last = a[-1]
time.sleep(random.random() * 2.2 / fps)
q0 = queue.Queue()
def stream_fetcher():
for e in live_stream():
q0.put(e)
threading.Thread(target = stream_fetcher, daemon = True).start()
aranges = np.arange(image_height, dtype = np.int32)[:, None]
q1 = queue.Queue()
def renderer():
def normalized(data, data_min, data_max, maximum_value):
nomamized_data = maximum_value * ((data - data_min) / (data_max - data_min))
return nomamized_data
prev_image = np.zeros((image_height, 0), dtype = np.uint8)
prev_vols = np.zeros((0,), dtype = np.float64)
while True:
data = []
data.append(q0.get())
try:
while True:
data.append(q0.get(block = False))
except queue.Empty:
pass
data_vols = [e[0] for e in data]
data_minx, data_maxx = data[0][1], data[-1][2]
vols = np.concatenate(data_vols)[-image_width:]
prev_vols = prev_vols[-(image_width - vols.size) or prev_vols.size:]
concat_vols = np.concatenate((prev_vols, vols))[-image_width:]
vols_min, vols_max = np.amin(concat_vols), np.amax(concat_vols)
if prev_vols.size > 0 and (vols_min < np.amin(prev_vols) - 10 ** -8 or vols_max > np.amax(prev_vols) + 10 ** -8):
vols = concat_vols
prev_image = prev_image[:, :-prev_vols.size]
prev_vols = prev_vols[:0]
vols_norm = normalized(
data = vols, data_min = vols_min,
data_max = vols_max, maximum_value = image_height,
)
image = (aranges < vols_norm.astype(np.int32)[None, :]).astype(np.uint8) * 255
whole_image = np.concatenate((prev_image, image), axis = 1)[:, -image_width:]
q1.put((whole_image, data_maxx - whole_image.shape[1] + 1, data_maxx, vols_min, vols_max))
prev_image = whole_image
prev_vols = concat_vols
threading.Thread(target = renderer, daemon = True).start()
def visualizer():
import matplotlib.pyplot as plt, matplotlib.animation
def images():
while True:
data = []
data.append(q1.get())
try:
while True:
data.append(q1.get(block = False))
except queue.Empty:
pass
minx = min([e[1] for e in data])
maxx = min([e[2] for e in data])
miny = min([e[3] for e in data])
maxy = min([e[4] for e in data])
image = np.concatenate([e[0] for e in data], axis = 1)[:, -image_width:]
image = np.pad(image, ((0, 0), (image_width - image.shape[1], 0)), constant_values = 0)
image = np.repeat(image[:, :, None], 3, axis = -1)
yield image, minx, maxx, miny, maxy
it = images()
im = None
fig = plt.figure(figsize = (image_width / dpi, image_height / dpi), dpi = dpi)
def animate_func(i):
nonlocal it, im, fig
image, minx, maxx, miny, maxy = next(it)
print(f'.', end = '', flush = True)
if im is None:
im = plt.imshow(image, interpolation = 'none', aspect = 'auto')
else:
im.set_array(image)
im.set_extent((minx, maxx, miny, maxy))
return [im]
anim = matplotlib.animation.FuncAnimation(fig, animate_func, frames = round(save_nsec * fps), interval = 1000 / fps)
print('saving...', end = '', flush = True)
#anim.save('result.mp4', fps = fps, dpi = dpi, extra_args = ['-vcodec', 'libx264'])
anim.save('result.gif', fps = fps, dpi = dpi, writer = 'imagemagick')
print('saved!', end = '', flush = True)
plt.show()
threading.Thread(target = visualizer, daemon = True).start()
while True:
time.sleep(0.1)
Then I've decided to play a bit and colored last image with RGB palette, the higher the peak is more red-ish it is, if it is more in the middle then it is more green-ish, if it is low enough then it is more blue-ish. Resulting image below was achieved by this coloring code:
And another one colored animation below, line-style instead of bar-style, with the help of this code:

Save Video from Opencv

I create space detection code by using gray, gausian blur but now I dont know where to put these code to save my opencv video.
I already tried to put the code in random line but it only comes out of the file at the output, I cant play it and the video also just 5.6KB. I even tried to record a video for a very long time.
My code runs fine without save feature but I want to add save video feature:
fourcc = open_cv.VideoWriter_fourcc(*'DIVX')
out = open_cv.VideoWriter('output.avi',fourcc, 20.0, (640,480))
these is my code that i want to be add save video coding from above :
import cv2 as open_cv
import numpy as np
import logging
from drawing_utils import draw_contours
from colors import COLOR_GREEN, COLOR_WHITE, COLOR_BLUE
class MotionDetector:
LAPLACIAN = 1.4
DETECT_DELAY = 1
def __init__(self, video, coordinates, start_frame):
self.video = 0
self.coordinates_data = coordinates
self.start_frame = start_frame
self.contours = []
self.bounds = []
self.mask = []
def detect_motion(self):
capture = open_cv.VideoCapture(self.video)
capture.set(open_cv.CAP_PROP_POS_FRAMES, self.start_frame)
coordinates_data = self.coordinates_data
logging.debug("coordinates data: %s", coordinates_data)
for p in coordinates_data:
coordinates = self._coordinates(p)
logging.debug("coordinates: %s", coordinates)
rect = open_cv.boundingRect(coordinates)
logging.debug("rect: %s", rect)
new_coordinates = coordinates.copy()
new_coordinates[:, 0] = coordinates[:, 0] - rect[0]
new_coordinates[:, 1] = coordinates[:, 1] - rect[1]
logging.debug("new_coordinates: %s", new_coordinates)
self.contours.append(coordinates)
self.bounds.append(rect)
mask = open_cv.drawContours(
np.zeros((rect[3], rect[2]), dtype=np.uint8),
[new_coordinates],
contourIdx=-1,
color=255,
thickness=-1,
lineType=open_cv.LINE_8)
mask = mask == 255
self.mask.append(mask)
logging.debug("mask: %s", self.mask)
statuses = [False] * len(coordinates_data)
times = [None] * len(coordinates_data)
while capture.isOpened():
result, frame = capture.read()
if frame is None:
break
if not result:
raise CaptureReadError("Error reading video capture on frame %s" % str(frame))
blurred = open_cv.GaussianBlur(frame.copy(), (5, 5), 3)
grayed = open_cv.cvtColor(blurred, open_cv.COLOR_BGR2GRAY)
new_frame = frame.copy()
logging.debug("new_frame: %s", new_frame)
position_in_seconds = capture.get(open_cv.CAP_PROP_POS_MSEC) / 1000.0
for index, c in enumerate(coordinates_data):
status = self.__apply(grayed, index, c)
if times[index] is not None and self.same_status(statuses, index, status):
times[index] = None
continue
if times[index] is not None and self.status_changed(statuses, index, status):
if position_in_seconds - times[index] >= MotionDetector.DETECT_DELAY:
statuses[index] = status
times[index] = None
continue
if times[index] is None and self.status_changed(statuses, index, status):
times[index] = position_in_seconds
for index, p in enumerate(coordinates_data):
coordinates = self._coordinates(p)
color = COLOR_GREEN if statuses[index] else COLOR_BLUE
draw_contours(new_frame, coordinates, str(p["id"] + 1), COLOR_WHITE, color)
open_cv.imshow(str(self.video), new_frame)
k = open_cv.waitKey(1)
if k == ord("q"):
break
capture.release()
open_cv.destroyAllWindows()
def __apply(self, grayed, index, p):
coordinates = self._coordinates(p)
logging.debug("points: %s", coordinates)
rect = self.bounds[index]
logging.debug("rect: %s", rect)
roi_gray = grayed[rect[1]:(rect[1] + rect[3]), rect[0]:(rect[0] + rect[2])]
laplacian = open_cv.Laplacian(roi_gray, open_cv.CV_64F)
logging.debug("laplacian: %s", laplacian)
coordinates[:, 0] = coordinates[:, 0] - rect[0]
coordinates[:, 1] = coordinates[:, 1] - rect[1]
status = np.mean(np.abs(laplacian * self.mask[index])) < MotionDetector.LAPLACIAN
logging.debug("status: %s", status)
return status
#staticmethod
def _coordinates(p):
return np.array(p["coordinates"])
#staticmethod
def same_status(coordinates_status, index, status):
return status == coordinates_status[index]
#staticmethod
def status_changed(coordinates_status, index, status):
return status != coordinates_status[index]
class CaptureReadError(Exception):
pass
Create the file for the video to go in. You can think of this file like a book, but a book with no pages. This code is used to create the file:
fourcc = open_cv.VideoWriter_fourcc(*'DIVX')
out = open_cv.VideoWriter('output.avi',fourcc, 20.0, (640,480))
Not all codecs work on all systems. I use Ubuntu, and this is the code I use to create a video file:
out = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30,(w,h))
If you try to play this video, nothing will happen. There are no frames in the video, so there is nothing to play (like a book with no pages). After you process each frame, the frame needs to be written to the video (like putting a page in a book):
out.write(new_frame)
You should do this around .imshow():
out.write(new_frame)
open_cv.imshow(str(self.video), new_frame)
k = open_cv.waitKey(1)
if k == ord("q"):
break

Raspberry Pi 3 Increasing FPS with 720p USB Camera

I write some code in python to open USB camera and grab frame from it. I use my code for http stream. For JPEG encode I use libturbojpeg library. For this I use 64 bit OS.
product: Raspberry Pi 3 Model B Rev 1.2
serial: 00000000f9307746
width: 64 bits
capabilities: smp cp15_barrier setend swp
I do some test with different resolutions.
Resolution FPS Time for encode
640 x 480 ~35 ~0.01
1280 x 720 ~17 ~0.028
And this is my code
import time
import os
import re
import uvc
from turbojpeg import TurboJPEG, TJPF_GRAY, TJSAMP_GRAY
jpeg = TurboJPEG("/opt/libjpeg-turbo/lib64/libturbojpeg.so")
camera = None
import numpy as np
from threading import Thread
class ProcessJPG(Thread):
def __init__(self, data):
self.jpeg_data = None
self.data = data
super(ProcessJPG, self).__init__()
def run(self):
self.jpeg_data = jpeg.encode((self.data))
def get_frame(self):
self.frame = camera.get_frame()
global camera
dev_list = uvc.device_list()
print("devices: ", dev_list)
camera = uvc.Capture(dev_list[1]['uid'])
camera.frame_size = camera.frame_sizes[2] // set 1280 x 720
camera.frame_rate = camera.frame_rates[0] // set 30 fps
class GetFrame(Thread):
def __init__(self):
self.frame = None
super(GetFrame, self).__init__()
def run(self):
self.frame = camera.get_frame()
_fps = -1
count_to_fps = 0
_real_fps = 0
from time import time
_real_fps = ""
cfps_time = time()
while True:
if camera:
t = GetFrame()
t.start()
t.join()
img = t.frame
timestamp = img.timestamp
img = img.img
ret = 1
t_start = time()
t = ProcessJPG(img)
t.start()
t.join()
jpg = t.jpeg_data
t_end = time()
print(t_end - t_start)
count_to_fps += 1
if count_to_fps >= _fps:
t_to_fps = time() - cfps_time
_real_fps = 1.0 / t_to_fps
cfps_time = time()
count_to_fps = 0
print("FPS, ", _real_fps)
Encoding line is: jpeg.encode((self.data))
My question is, it is possible to increase FPS for 1280 x 720 (eg 30fps) resolution or should I use more powerful device? When I look on htop during the computation CPU is not used in 100%.
EDIT:
Camera formats:
[video4linux2,v4l2 # 0xa705c0] Raw : yuyv422 : YUYV 4:2:2 : 640x480 1280x720 960x544 800x448 640x360 424x240 352x288 320x240 800x600 176x144 160x120 1280x800
[video4linux2,v4l2 # 0xa705c0] Compressed: mjpeg : Motion-JPEG : 640x480 1280x720 960x544 800x448 640x360 800x600 416x240 352x288 176x144 320x240 160x120
It is possible and you don't need more powerful hardware.
From the pyuvc
README.md,
* Capture instance will always grab mjpeg conpressed frames from cameras.
When your code accesses the .img property, that invokes jpeg2yuv (see
here and
here). Then
you are re-encoding with jpeg_encode(). Try using frame.jpeg_buffer after
the capture and don't touch .img at all.
I took a look at pyuvc on an RPi2 with a Logitech
C310 and made a
simplified example,
import uvc
import time
def main():
dev_list = uvc.device_list()
cap = uvc.Capture(dev_list[0]["uid"])
cap.frame_mode = (1280, 720, 30)
tlast = time.time()
for x in range(100):
frame = cap.get_frame_robust()
jpeg = frame.jpeg_buffer
print("%s (%d bytes)" % (type(jpeg), len(jpeg)))
#img = frame.img
tnow = time.time()
print("%.3f" % (tnow - tlast))
tlast = tnow
cap = None
main()
I get ~.033s per frame, which works out to ~30fps at ~8%CPU. If I uncomment the #img = frame.img line it goes up to ~.054s/frame or ~18fps at 99%CPU (the decode time limits the capture rate).

pool map no performance gain

I'm doing video capture from opencv using the following
frame_count = 90000
while f < frame_count:
ret, frame = cap.read()
f+=1
if (f > 41300):
dst = cv2.warpPerspective(frame.copy(), matrix, (frame.shape[1], frame.shape[0]))
pipeline(pool_1, pool_2, frame, car_tracker, ped_tracker, df_region, region_buffered, df_line_car, df_line_ped, det, dst, matrix)
cv2.imshow("frame", frame)
cv2.imshow('dst', dst)
cv2.waitKey(1)
In function pipeline:
df_cars= pool_1.map(compute, cars)
df_peds = pool_2.map(compute, peds)
compute is:
def compute(v):
gb = ('trajectory_id',)
global general_pd
id = v[0]
x = v[1]
y = v[2]
frame_id = v[3]
general_pd.loc[len(general_pd)] = [id, x,y, frame_id]
grouped = general_pd.loc[general_pd['trajectory_id'] == id]
df_region = v[4]
region_buffered = v[5]
df_line = v[6]
rpp = RawParameterProcessor(grouped, df_line, frame_idx, df_region, region_buffered, gb=gb)
df_parameter_car = rpp.compute()
return df_parameter_car
I don't know what I'm doing wrong, so that I can't get the correct performance gain, I launch two process, and each frame of the video capture I launch a process and do the jobs async, but I don't get any performance gain.

Finding the Wrong Homography

I am trying to do an image stitching project with OpenCV in Python where I use point matches calculated by tracking points between frames of a video using the Lucas Kanade algorithm to find homography matrices. After writing the program and it came time for stitching together the frames of a video, I decided to run a test where I simply display the perspective warped versions of each image onto a black canvas to see how the Homography matrix had warped them. When I did this, instead of moving over bit by bit between frames, frames were translated further and further distances, way off from a slight nudge between frames
[----------------------------------------------------------------------------Empty Space------------------------------------]
[Frame0------------------------------------------------------------------------------------------------------------------------]
[------------Frame1----------------------------------------------------------------------------------------------------------- ]
[-------------------------------------------Frame 2----------------------------------------------------------------------------]
[------------------------------------------------------------------------------------------------------------Frame 3-----------]
Subsequent frames would be out of visual range. I am not quiet sure why this is happening. I implemented a back-projection error check to make sure only points with accurate optical flow calculations were passed on. I also set the back-projection threshold for findHomography to be 10, 1, and then 0.5, all to no avail. I am stitching multiple images, so I am multiplying my homography matrices between frames. This seems to be compounding the error. Why is this happening and how can I fix my homography matrices? Here is my code (ignore commented out tests. Also, some of the indentation formatting might have been messed with while copying over to the forum):
import numpy as np
import sys
import cv2
import math
lastFeatures = None
currentFeatures = None
opticFlow = None
panRow = None
Rows = None
finalPanorama = None
def loadRow(dirPath, fType, numImages, column):
imageRow = []
for i in range(0, numImages):
imageRow.append(cv2.imread("%s/%i_%i.%s" % (dirPath, column, i, fType), cv2.IMREAD_COLOR))
return imageRow
def findNthFeatures(prevImg, prevPnts, nxtImg):
back_threshold = 0.5
nxtDescriptors = []
prevGrey = None
nxtGrey = None
nxtPnts = prevPnts[:]
prevGrey = cv2.cvtColor(prevImg, cv2.COLOR_BGR2GRAY)
nxtGrey = cv2.cvtColor(nxtImg, cv2.COLOR_BGR2GRAY)
lucasKanadeParams = dict(winSize = (19,19), maxLevel = 100, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
nxtPnts, status, err = cv2.calcOpticalFlowPyrLK(prevGrey, nxtGrey, prevPnts, None, **lucasKanadeParams)
backProjections, status, err = cv2.calcOpticalFlowPyrLK(nxtGrey, prevGrey, nxtPnts, None, **lucasKanadeParams)
d = abs(prevPnts - backProjections).reshape(-1, 2).max(-1)
status = d < back_threshold
goodNew = nxtPnts[status].copy()
goodLast = prevPnts[status].copy()
return goodLast, goodNew
def getHomographies(videoName):
color = np.random.randint(0,255,(100,3))
lastFrame = None
currentFrame = None
lastKeypoints = None
currentKeypoints = None
firstImage = True
featureRefreshRate = 5
feature_params = dict( maxCorners = 100,
qualityLevel = 0.1,
minDistance = 8,
blockSize = 15)
frameCount = 0
Homographies = []
cv2.namedWindow('display', cv2.WINDOW_NORMAL)
cap = cv2.VideoCapture(videoName)
flags, frame = cap.read()
while flags:
if firstImage:
firstImage = False
lastFrame = frame[:,:].copy()
lastGray = cv2.cvtColor(lastFrame, cv2.COLOR_BGR2GRAY)
lastKeypoints = cv2.goodFeaturesToTrack(lastGray, mask = None, **feature_params)
flags, frame = cap.read()
frameCount += 1
else:
mask = np.zeros_like(lastFrame)
currentFrame = frame[:,:].copy()
frameCount += 1
lastKeypoints, currentKeypoints = findNthFeatures(lastFrame, lastKeypoints, currentFrame)
# for i,(new,old) in enumerate(zip(currentKeypoints, lastKeypoints)):
# a, b = new.ravel()
# c, d = old.ravel()
# mask = cv2.line(mask, (a,b), (c,d), color[i].tolist(), 2)
# frame = cv2.circle(frame, (a,b), 5, color[i].tolist(), -1)
# img = cv2.add(frame,mask)
# cv2.imshow('display', img)
# cv2.waitKey(0)
homographyMatrix, homographyStatus = cv2.findHomography(currentKeypoints, lastKeypoints, cv2.RANSAC, 0.5)
Homographies.append(homographyMatrix)
lastFrame = currentFrame
lastKeypoints = currentKeypoints
if frameCount % featureRefreshRate == 0:
grayBuf = cv2.cvtColor(lastFrame, cv2.COLOR_BGR2GRAY)
lastKeypoints = cv2.goodFeaturesToTrack(grayBuf, mask = None, **feature_params)
flags, frame = cap.read()
return Homographies
def stitchRow(videoName):
cv2.namedWindow('display', cv2.WINDOW_NORMAL)
frameCount = 0
cap = cv2.VideoCapture(videoName)
ret, initialImage = cap.read()
homographyMatrices = []
homographyMatrices = getHomographies(videoName)
warpHMat = homographyMatrices[frameCount]
while ret:
ret, nextImg = cap.read()
frameCount += 1
result = cv2.warpPerspective(nextImg, warpHMat, (initialImage.shape[1] + nextImg.shape[1], nextImg.shape[0]))
#result[0:initialImage.shape[0], 0:initialImage.shape[1]] = initialImage
cv2.imshow('display', result)
cv2.waitKey(0)
# cv2.imshow('display', initialImage)
# cv2.waitKey(0)
warpHMat = homographyMatrices[frameCount]
for j in range(frameCount, 0, -1):
warpHMat = warpHMat * homographyMatrices[j-1]
# initialImage = result[:, :].copy()
stitchRow(sys.argv[1])

Categories