Stream Web Depth Camera (Intel D455) with python - python

I'm trying to stream the depth and RGB video of a depth camera (Intel D455) through web.
I'm re-using a script from here: https://pyshine.com/Live-streaming-multiple-videos-on-a-webpage/
My probleme is the following, when I start the script and the 2 threads, the 2 ports (9000 and 9001 will display the last thread) but if I only start one of them, the good port display the good video (and of course the other doesn't work...
Do you have an idea where I've made a mistake? (Maybe in the pipeline?)
Thanks
Here is the code:
import cv2
import pyshine as ps
from multiprocessing import Process
import pyrealsense2 as rs
import numpy as np
import threading
HTML="""
<html>
<head>
<title>PyShine Live Streaming</title>
</head>
<body>
<center><h1> PyShine Live Streaming Multiple videos </h1></center>
<center><img src="10.112.33.161:9000/stream.mjpg" width='360' height='240' autoplay playsinline></center>
</body>
</html>
"""
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
class ImgCapture():
def __init__(self):
pass
def read(self):
# Wait for a coherent pair of frames: depth and color
self.frames = pipeline.wait_for_frames()
depth_frame = self.frames.get_depth_frame()
color_frame = self.frames.get_color_frame()
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_BONE)
depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape
# If depth and color resolutions are different, resize color image to match depth image for display
if depth_colormap_dim != color_colormap_dim:
color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
return(color_image, depth_colormap)
def isOpened(self):
ret, _, _ = self.rs.get_frame_stream()
return(ret)
class ImgDepth():
def __init__(self, cap):
self.capture = cap
pass
def read(self):
color_frame, depth_colormap = self.capture.read()
if depth_colormap is not None:
ret = True
return(ret,depth_colormap)
def isOpened(self):
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret)
class ImgColor():
def __init__(self,cap):
self.capture = cap
pass
def read(self):
# capture = ImgCapture()
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret, color_image)
def isOpened(self):
# capture = ImgCapture()
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret)
def color():
StreamProps = ps.StreamProps
StreamProps.set_Page(StreamProps,HTML)
address = ('10.112.33.161',9001) # Enter your IP address
try:
StreamProps.set_Mode(StreamProps,'cv2')
capture0 = ImgCapture()
capture1 = ImgColor(capture0)
StreamProps.set_Capture(StreamProps,capture1)
StreamProps.set_Quality(StreamProps,90)
server = ps.Streamer(address,StreamProps)
print('Server started at','http://'+address[0]+':'+str(address[1]))
server.serve_forever()
print('done')
except KeyboardInterrupt:
pipeline.stop()
server.socket.close()
def depth():
StreamProps = ps.StreamProps
StreamProps.set_Page(StreamProps,HTML)
address = ('10.112.33.161',9000) # Enter your IP address
try:
StreamProps.set_Mode(StreamProps,'cv2')
capture0 = ImgCapture()
capture2 = ImgDepth(capture0)
StreamProps.set_Capture(StreamProps,capture2)
StreamProps.set_Quality(StreamProps,90)
server = ps.Streamer(address,StreamProps)
print('Server started at','http://'+address[0]+':'+str(address[1]))
server.serve_forever()
except KeyboardInterrupt:
pipeline.stop()
server.socket.close()
if __name__=='__main__':
# Start streaming
pipeline.start(config)
t1 = threading.Thread(target=depth).start()
t2 = threading.Thread(target=color).start()

If someone need to stream Depth Camera from Intel:
import cv2
import pyrealsense2 as rs
import numpy as np
from flask import Flask, render_template, Response
app = Flask('hello')
HTML="""
<html>
<head>
<title>PyShine Live Streaming</title>
</head>
<body>
<center><h1> PyShine Live Streaming Multiple videos </h1></center>
<center><img src="youradresse:port/stream.mjpg" width='360' height='240' autoplay playsinline></center>
</body>
</html>
"""
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
class ImgCapture():
def __init__(self):
pass
def read(self):
# Wait for a coherent pair of frames: depth and color
self.frames = pipeline.wait_for_frames()
depth_frame = self.frames.get_depth_frame()
color_frame = self.frames.get_color_frame()
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Convert image of 16uint to 2x 8uint
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.convertScaleAbs(depth_image, alpha=0.03)
depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape
# If depth and color resolutions are different, resize color image to match depth image for display
if depth_colormap_dim != color_colormap_dim:
color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
return(color_image, depth_colormap)
def isOpened(self):
ret, _, _ = self.rs.get_frame_stream()
return(ret)
class ImgDepth():
def __init__(self, cap):
self.capture = cap
pass
def read(self):
color_frame, depth_colormap = self.capture.read()
if depth_colormap is not None:
ret = True
return(ret,depth_colormap)
def isOpened(self):
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret)
class ImgColor():
def __init__(self,cap):
self.capture = cap
pass
def read(self):
# capture = ImgCapture()
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret, color_image)
def isOpened(self):
# capture = ImgCapture()
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret)
def gen_frames_depth():
while True:
success, DEPTH = capture_depth.read()
print(DEPTH)
if not success:
break
else:
_, buffer_DEPTH = cv2.imencode('.jpg', DEPTH)
frame_depth = buffer_DEPTH.tobytes()
yield (b'--frame\r\n'
b'Content-Type:image/jpeg\r\n'
b'Content-Length: ' + f"{len(frame_depth)}".encode() + b'\r\n'
b'\r\n' + frame_depth + b'\r\n')
#app.route('/video_feed_depth')
def video_feed_depth():
return Response(gen_frames_depth(), mimetype='multipart/x-mixed-replace; boundary=--frame')
def gen_frames_color():
while True:
success, RGB = capture_color.read()
if not success:
break
else:
_, buffer_RGB = cv2.imencode('.jpg', RGB)
frame_RGB = buffer_RGB.tobytes()
yield (b'--frame\r\n'
b'Content-Type:image/jpeg\r\n'
b'Content-Length: ' + f"{len(frame_RGB)}".encode() + b'\r\n'
b'\r\n' + frame_RGB + b'\r\n')
#app.route('/video_feed_color')
def video_feed_color():
return Response(gen_frames_color(), mimetype='multipart/x-mixed-replace; boundary=--frame')
#app.route('/')
def index():
return """
<body>
<div class="container">
<div class="row">
<div class="col-lg-8 offset-lg-2">
<h3 class="mt-5">Live Streaming</h3>
<img src="/video_feed_depth" width="50%">
<img src="/video_feed_color" width="50%">
</div>
</div>
</div>
</body>
"""
if __name__=='__main__':
# Start streaming
pipeline.start(config)
capture0 = ImgCapture()
capture_depth = ImgDepth(capture0)
capture_color = ImgColor(capture0)
app.run(host="0.0.0.0")

Related

Video stutters when multiple clients requests the stream from the same rtsp source

I have one rtsp server with multiple videos hosted using multiprocessing queues as channels using sensor factory objects using gstreamer. The reason for using multi processing queue is because of the limitation of nvh264enc encoder in the pipeline(The teardown causes the ports to be nonusable in subsequent client connections, no problem in x264enc). The problem happens when multiple clients are connected to the same link, even though the factory is shared between clients with same media element. This problem occurs. The results can be seen in clients such as vlc player.
"""
This module will provide encoder functionality
"""
from base64 import decode
from gc import get_stats
from os import stat
from queue import Empty
import sys
import itertools
import numpy as np
import logging
import threading
import multiprocessing as mp
import gi
from pprint import pprint
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GObject, GLib
import cv2
import subprocess
import time
GObject.threads_init()
Gst.init(None)
# def on_debug(category, level, dfile, dfctn, dline, source, message, user_data):
# if source:
# print('Debug {} {}: {}'.format(
# Gst.DebugLevel.get_name(level), source.name, message.get()))
# else:
# print('Debug {}: {}'.format(
# Gst.DebugLevel.get_name(level), message.get()))
# if not Gst.debug_is_active():
# Gst.debug_set_active(True)
# level = Gst.debug_get_default_threshold()
# Gst.debug_set_default_threshold(Gst.DebugLevel.INFO)
# if level < Gst.DebugLevel.ERROR:
# Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
# Gst.debug_add_log_function(on_debug, None)
# Gst.debug_remove_log_function(Gst.debug_log_default)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
handlers=[logging.FileHandler('GPU_enabled_frame_encode.log'), logging.StreamHandler(sys.stdout)])
logging.debug('Debug message')
logging.info('Info message')
logging.warning('Warning message')
logging.error('Error message')
logging.critical('Critical message')
class SensorFactory(GstRtspServer.RTSPMediaFactory):
def __init__(self, fps, img_shape, cols, verbosity=1, cap=None, speed_preset='medium', properties={}):
super(SensorFactory, self).__init__(**properties)
logging.info("sensory factory")
self.rtsp_media = None
self.height = int(img_shape[0])
self.width = int(img_shape[1] * cols)
self.number_frames = 0
self.stream_timestamp = 0.0
self.timestamp = time.time()
self.dt = 0.0
self.streamed_frames = 0
self.verbosity = verbosity
fps = int(fps)
self.cap = cap
self.appsrc = None
# duration of a frame in nanoseconds nvh264enc x264enc
self.duration = 1.0 / fps * Gst.SECOND
key_int_max = ' key-int-max={} '.format(fps)
caps_str = 'caps=video/x-raw,format=BGR,width={},height={},framerate={}/1 '.format(self.width,
self.height,
fps)
self.launch_string = 'appsrc name=source is-live=true block=true do-timestamp=true \
format=GST_FORMAT_TIME ' + caps_str + \
' ! queue' \
' ! videoconvert' \
' ! video/x-raw,format=I420' \
' ! nvh264enc' \
' ! rtph264pay config-interval=1 pt=96 name=pay0' \
''
def set_cap(self, cap):
self.cap = cap
def on_need_data(self, src, length):
# this method executes when client requests data
# logging.info("this method executes when client requests data")
# if self.cap.isOpened():
# _, frame = self.cap.get_frame()
# ret = True
if self.cap.isOpened():
frame_queue = self.cap.get_queue()
frame = frame_queue.get()
ret = True
if ret:
if frame.shape[:2] != (self.height, self.width):
frame = cv2.resize(frame, (self.width, self.height))
data = frame.tostring()
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
buf.duration = self.duration
timestamp = self.number_frames * self.duration
buf.pts = buf.dts = int(timestamp)
buf.offset = timestamp
self.number_frames += 1
retval = self.appsrc.emit('push-buffer', buf)
# print('pushed buffer, frame {}, duration {} ns, durations {} s'.format(self.number_frames,
# self.duration,
# self.duration / Gst.SECOND))
if retval != Gst.FlowReturn.OK:
logging.info("[INFO]: retval not OK: {}".format(retval))
if retval == Gst.FlowReturn.FLUSHING:
logging.info('Offline')
elif self.verbosity > 0:
logging.info("[INFO]: Unable to read frame from cap.")
# time.sleep(0.05)
def do_create_element(self, url):
if self.verbosity > 0:
request_uri = url.get_request_uri()
logging.info('[INFO]: stream request on {}'.format(request_uri))
return Gst.parse_launch(self.launch_string)
def do_configure(self, rtsp_media):
self.rtsp_media = rtsp_media
rtsp_media.set_reusable(True)
self.number_frames = 0
self.appsrc = rtsp_media.get_element().get_child_by_name('source')
# executes when client requests data
self.appsrc.connect('need-data', self.on_need_data)
def get_rtsp_media(self):
if self.rtsp_media:
return self.rtsp_media
def __del__(self):
print('Destructor called, factory deleted.')
class RTSP_utility_server(GstRtspServer.RTSPServer):
def __init__(self, fps, suffix='test', rtp_port=8554,
ip='12.0.0.0', caps=(None,), Sizes=[[1080, 1920]],
speed_preset='medium', verbosity=1, Indexes=[]):
GObject.threads_init()
Gst.init(None)
super(RTSP_utility_server, self).__init__(**{})
self.verbosity = verbosity
self.rtp_port = "{}".format(rtp_port)
if int(self.rtp_port) < 1024 and self.verbosity > 0:
logging.info(
'[INFO]: Note, admin privileges are required because port number < 1024.')
self.set_service(self.rtp_port)
self.speed_preset = speed_preset
self.caps = caps
self.factory = [None] * len(self.caps)
self.suffix = suffix
self.fps = fps
self.Sizes = Sizes
self.Indexes = Indexes
self.attach(None)
self.ip = self.get_ip()
self.media_path_list = [None] * len(self.caps)
self.clients_list = []
if len(self.suffix):
self.full_suffix = '/' + self.suffix.lstrip('/')
else:
self.full_suffix = ''
self.connect("client-connected", self.client_connected)
logging.info(
'[INFO]: streaming on:\n\trtsp://{}:{}/{}#'.format(self.ip, self.rtp_port, self.suffix))
self.status_thread = threading.Thread(target=self.status_thread_loop)
self.status_thread.daemon = True
self.status_thread.start()
self.context = GLib.MainContext()
print(self.attach(self.context))
def set_caps(self, caps):
if not isinstance(caps, (list, tuple)):
caps = [caps]
self.caps = caps
def create_media_factories(self):
mount_points = self.get_mount_points()
media_path_list = []
for i, cap in enumerate(self.caps):
img_shape = self.Sizes[i]
if len(self.Indexes) == 0:
N_Index = str(i + 1)
else:
N_Index = str(self.Indexes[i])
factory = SensorFactory(fps=self.fps, img_shape=img_shape, speed_preset=self.speed_preset,
cols=1, verbosity=self.verbosity, cap=cap)
factory.set_shared(True)
factory.set_stop_on_disconnect(True)
logging.info('inside media_factories Stream on ' +
self.full_suffix + N_Index)
logging.info('inside media_factories Stream on ' +
self.full_suffix + N_Index)
mount_points.add_factory(self.full_suffix + N_Index, factory)
self.factory[i] = factory
media_path_list.append(self.full_suffix + N_Index)
self.media_path_list = media_path_list
self.get_status()
def destroy_media_factories(self):
session_pool = self.get_session_pool()
logging.info("Number of sessions are :" +
str(session_pool.get_n_sessions()))
sessions_list = session_pool.filter()
for session in sessions_list:
for path in self.get_paths():
media_matched, _ = session.get_media(path)
if media_matched:
rtsp_media = media_matched.get_media()
rtsp_media.set_eos_shutdown(True)
rtsp_media.unprepare()
logging.debug("media removed for path "+path)
number_of_disconnects = session_pool.cleanup()
if number_of_disconnects > 0:
logging.info("number of disconnects:"+str(number_of_disconnects))
def destroy_media_factories_by_path(self,path_to_remove="/video1"):
session_pool = self.get_session_pool()
logging.info("Number of sessions are :" +
str(session_pool.get_n_sessions()))
sessions_list = session_pool.filter()
for session in sessions_list:
for path in self.get_paths():
media_matched, _ = session.get_media(path)
if media_matched and path==path_to_remove:
rtsp_media = media_matched.get_media()
rtsp_media.set_eos_shutdown(True)
rtsp_media.unprepare()
logging.debug("media removed for path "+path)
number_of_disconnects = session_pool.cleanup()
if number_of_disconnects > 0:
logging.info("number of disconnects:"+str(number_of_disconnects))
def client_connected(self, gst_server_obj, rtsp_client_obj):
logging.info('[INFO]: Client has connected')
self.create_media_factories()
self.clients_list.append(rtsp_client_obj)
if self.verbosity > 0:
logging.info('[INFO]: Client has connected')
def stop_all(self):
self.destroy_media_factories()
def stop_by_index(self,path):
self.destroy_media_factories_by_path(path)
def get_paths(self):
return self.media_path_list
def get_status(self):
mount_points = self.get_mount_points()
session_pool = self.get_session_pool()
# logging.info("Number of sessions are :" +
# str(session_pool.get_n_sessions()))
number_of_disconnects = session_pool.cleanup()
if number_of_disconnects > 0:
logging.info("number of disconnects:"+str(number_of_disconnects))
for path in self.get_paths():
sessions_list = session_pool.filter()
for session in sessions_list:
session.set_timeout(1)
media_matched, _ = session.get_media(path)
if media_matched:
rtsp_media = media_matched.get_media()
status = rtsp_media.get_status() #<enum GST_RTSP_MEDIA_STATUS_PREPARED of type GstRtspServer.RTSPMediaStatus>
if "GST_RTSP_MEDIA_STATUS_UNPREPARING" in str(status):
# print(self.context)
# GObject.Object.unref(self)
# print(status)
# transport = media_matched.get_transport(0)
# transport.set_active(False)
# print(enum_list)
# print(rtsp_media.unprepare())
# rtsp_media.set_eos_shutdown(True)
# session.set_timeout(1)
# session.release_media(media_matched)
# session.allow_expire()
# session.release_media(media_matched)
# media_matched.set_state(Gst.State.PAUSED)
# media_matched.set_state(Gst.State.READY)
# media_matched.set_state(Gst.State.NULL)
# rtsp_media.suspend()
# print(rtsp_media.unprepare())
# rtsp_media.set_pipeline_state(Gst.State.READY)
# rtsp_media.set_pipeline_state(Gst.State.NULL)
# rtsp_media.unprepare()
# self.client_filter()
print("removing")
def status_thread_loop(self):
while True:
time.sleep(2)
try:
self.get_status()
except Exception as e:
print(e)
def __del__(self):
print("Destroy called")
def get_current_encoders_details(self):
enc_details = dict()
for i,cap in enumerate(self.caps):
enc_details[i]={"path":self.media_path_list[i],"decoder":cap,"factory":self.factory[i]}
return enc_details
#staticmethod
def get_ip():
return subprocess.check_output("hostname -I", shell=True).decode('utf-8').split(' ')[0]
class encoders():
"""
This is the containter for multiple encoders
"""
def __init__(self,decoders=[],head=[],resolutions=[],suffix="video") -> None:
self.encoder_list = []
self.caps = decoders
self.fps = 60
Sizes = resolutions
self.enc_obj = RTSP_utility_server(self.fps, Sizes=Sizes, speed_preset="medium", caps=decoders, suffix=suffix,
verbosity=1, rtp_port=8554, ip='10.5.1.130')
def get_encoders(self):
return self.enc_obj.get_current_encoders_details()
def set_frame(self,enc_id=0,frame=np.zeros((360, 640, 3))):
encoder_objects = self.enc_obj.get_current_encoders_details()
decoder_obj = None
for id in self.enc_obj.get_current_encoders_details():
if id == enc_id:
decoder_obj = encoder_objects[id]["decoder"]
break
if decoder_obj:
decoder_obj.set_canvas(frame)
else:
print("Decoder not available")
def stop_all(self):
self.enc_obj.stop_all()
def stop_by_index(self,path="/video1"):
self.enc_obj.stop_by_index(path)
def get_status(self):
self.enc_obj.get_status()
the mp queue logic is as following
while True:
# print(f'Thread {self.media_id} Looking')
if self.stop_signal.is_set():
self.stop_signal.clear()
self.active = False
break
ret, frame = self.capture.read()
if ret and type(frame) != type(None):
self.frame = frame.copy()
self.latest_frame = frame.copy()
self.recieving_signal.set()
self.latest_frame_time = time.ctime()
try:
self.frame_queue.put(self.frame,block=False)
except Full:
pass
Here capture is cv2.VideoCapture from an offline file. Is there any way to improve the quality of the video without stuttering with new connections.

How to stream .webm or .mjpeg from Python Django server?

I have a .webm feed from a 3rd party camera source. I would like to redistribute this to my web clients. I can open the stream in OpenCV then redistribute the .jpg images which works perfectly with img tags. However, I would like to convert this into a .webm or .mjpeg stream for use with video.js or other video tag. I don't have enough experience with HTTP protocol or video formatting and all my research only shows this .jpg streaming method.
models.py
streamManagerLock = threading.Lock()
streamManagers = {}
class StreamManager:
active = True
lastTime = time.time()
image = None
def __init__(self, cameraObj):
self.camera = cameraObj
url = "https://" + str(cameraObj.system.systemID) + ".relay-la.vmsproxy.com"
self.stream = cv.VideoCapture("{url}/web/media/{camID}.webm?auth={auth}".format(url=url,
camID=cameraObj.cam_id,
auth=cameraObj.system.getAuth()))
self.stream.set(cv.CAP_PROP_BUFFERSIZE, 5)
self.killThread = threading.Thread(target=self.selfTerminate, daemon=True)
self.lastTime = time.time()
self.killThread.start()
self.updateThread = threading.Thread(target=self.update_frame, daemon=True)
self.updateThread.start()
def selfTerminate(self):
while True:
if time.time() - self.lastTime > 3:
break
time.sleep(1)
self.terminate()
def terminate(self):
with streamManagerLock:
streamManagers.pop(str(self.camera.cam_id))
self.active = False
self.stream.release()
def update_frame(self):
while self.active:
ret, image = self.stream.read()
if ret:
ret, jpeg = cv.imencode('.jpg', image)
self.image = jpeg.tobytes()
else:
ret, img = cv.imencode('.jpg', np.zeros([100, 100, 3], dtype=np.uint8))
return img.tobytes()
time.sleep(1/30)
def get_frame(self):
self.lastTime = time.time()
if self.image is None:
ret, img = cv.imencode('.jpg', np.zeros([100, 100, 3], dtype=np.uint8))
return img.tobytes()
return self.image
def getCameraStream(cameraObj):
with streamManagerLock:
if str(cameraObj.cam_id) in streamManagers.keys():
return streamManagers[str(cameraObj.cam_id)]
else:
r = StreamManager(cameraObj)
streamManagers.update({str(cameraObj.cam_id): r})
return r
class Camera(models.Model):
cam_id = models.UUIDField()
label = models.CharField(max_length=100)
visible = models.ManyToManyField(CompanyModel, blank=True)
system = models.ForeignKey(DWCloudSystem, verbose_name="System", on_delete=models.CASCADE)
profile = models.ManyToManyField(UserProfile, blank=True)
isPublic = models.BooleanField(default=False)
def getStream(self):
def gen():
stream = getCameraStream(self)
while True:
frame = stream.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
try:
return StreamingHttpResponse(gen(), content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
return HttpResponseServerError()
def getThumbnail(self, height):
url = "ec2/cameraThumbnail"
params = {"cameraId": str(self.cam_id), "time": "LATEST", "height": 300, "ignoreExternalArchive": None}
response = self.system.proxyRequest(url, params=params)
if response is None:
time.sleep(3)
response = self.system.proxyRequest(url, params=params)
if response is None:
return HttpResponseServerError()
return HttpResponse(
content=response.content,
status=response.status_code,
content_type=response.headers['Content-Type']
)
def __str__(self):
return "{}: {}".format(str(self.system.company_key), str(self.label))
views.py
stream/{uuid:camID}
#handleAPIAuth
def getStream(request, camID):
cameras = getAllCameras(request)
cameras = cameras.filter(cam_id=camID)
if len(cameras) != 1:
return HttpResponseForbidden()
camera = cameras[0]
return camera.getStream()
template.html
video tag
<video
id="cam-stream"
class="video-js"
controls
preload="auto"
width="640"
height="264"
data-setup="{}"
>
<source id="stream-source" src="stream/49289ede-a66e-c436-839c-6141fd7f8f87" type="application/x-mpegURL" />
<p class="vjs-no-js">
To view this video please enable JavaScript, and consider upgrading to a
web browser that
supports HTML5 video
</p>
</video>
When a thumbnail gets clicked:
<script type="text/javascript">
function changeImage(img){
document.getElementById("stream-source").src = "stream/"+img.getAttribute("camID");
}
</script>

How to integrate OpenCV webcam with Django and React?

I have a barcode reader which I implemented by using opensource Dynamsoft API and OpenCV.
Now I need to integrate it with Django and display on my website in React.
I have no idea how to do that, I tried passing the code to my views.py but don't know what I should do next.
Here is my code for barcode reading:
import cv2
from dbr import *
import time
reader = BarcodeReader()
def text_results_callback_func(frame_id, t_results, user_data):
print(frame_id)
for result in t_results:
text_result = TextResult(result)
print("Barcode Format : ")
print(text_result.barcode_format_string)
print("Barcode Text : ")
print(text_result.barcode_text)
print("Exception : ")
print(text_result.exception)
print("-------------")
def get_time():
localtime = time.localtime()
capturetime = time.strftime("%Y%m%d%H%M%S", localtime)
return capturetime
def read_barcode():
video_width = 0
video_height = 0
vc = cv2.VideoCapture(0)
video_width = vc.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = vc.get(cv2.CAP_PROP_FRAME_HEIGHT)
vc.set(3, video_width)
vc.set(4, video_height)
stride = 0
if vc.isOpened():
rval, frame = vc.read()
stride = frame.strides[0]
else:
return
windowName = "Barcode Reader"
parameters = reader.init_frame_decoding_parameters()
parameters.max_queue_length = 30
parameters.max_result_queue_length = 30
parameters.width = video_width
parameters.height = video_height
parameters.stride = stride
parameters.image_pixel_format = EnumImagePixelFormat.IPF_RGB_888
parameters.region_top = 0
parameters.region_bottom = 100
parameters.region_left = 0
parameters.region_right = 100
parameters.region_measured_by_percentage = 1
parameters.threshold = 0.01
parameters.fps = 0
parameters.auto_filter = 1
reader.start_video_mode(parameters, text_results_callback_func)
while True:
cv2.imshow(windowName, frame)
rval, frame = vc.read()
if rval == False:
break
try:
ret = reader.append_video_frame(frame)
except:
pass
key = cv2.waitKey(1)
if key == ord('q'):
break
reader.stop_video_mode()
cv2.destroyWindow(windowName)
print("-------------------start------------------------")
reader.init_license("***************************")
read_barcode()
I think you need to use the JS version of Dynamsoft Barcode Reader to scan barcodes using cameras in a webpage: https://www.dynamsoft.com/barcode-reader/sdk-javascript/

How to capture video by video from one rtsp Url using Opencv?

The server is sending video by video using the same RTSP URL(rtsp://192.168.0.2:8554/)
I can capture and display video using opencv.
import numpy as np
import cv2 as cv
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
cap = cv.VideoCapture('rtsp://192.168.0.2:8554/')
while cap.isOpened():
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
cv.imshow('frame', frame)
if cv.waitKey(1) == ord('q'):
break
cap.release()
cv.destroyAllWindows()
This program returns error when going on to the next video.
I tried this, but this didn't work.
import cv2 as cv
import os
import time
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
cap = cv.VideoCapture('rtsp://192.168.0.26:8554/')
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
try:
time.sleep(2)
# Capture frame-by-frame
ret, frame = cap.read()
# if frame is read correctly ret is True
# Our operations on the frame come here
# Display the resulting frame
cv.imshow('frame',frame)
if cv.waitKey(1) == ord('q'):
break
except:
print("Exception!!")
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
Can I get some help?
Thanks in advance!
I solved this by using multi-threaded program.
Main file
from datasets import LoadStreams
import threading
import os
import logging
import cv2
import torch
import time
logger = logging.getLogger(__name__)
def select_device(device='', batch_size=None):
# device = 'cpu' or '0' or '0,1,2,3'
cpu_request = device.lower() == 'cpu'
if device and not cpu_request: # if device requested other than 'cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availablity
cuda = False if cpu_request else torch.cuda.is_available()
if cuda:
c = 1024 ** 2 # bytes to MB
ng = torch.cuda.device_count()
if ng > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % ng == 0, f'batch-size {batch_size} not multiple of GPU count {ng}'
x = [torch.cuda.get_device_properties(i) for i in range(ng)]
s = f'Using torch {torch.__version__} '
for i, d in enumerate((device or '0').split(',')):
if i == 1:
s = ' ' * len(s)
logger.info(f"{s}CUDA:{d} ({x[i].name}, {x[i].total_memory / c}MB)")
else:
logger.info(f'Using torch {torch.__version__} CPU')
logger.info('') # skip a line
return torch.device('cuda:0' if cuda else 'cpu')
def detect(rtsp_url):
dataset = LoadStreams(rtsp_url)
device = select_device('')
count = 0
view_img = True
# img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
try:
for frame_idx, (path, img, im0s, vid_cap) in enumerate(dataset): # for every frame
count += 1
im0 = im0s[0].copy()
if view_img:
cv2.imshow(str(path), im0)
# if cv2.waitKey(1) == ord('q'): # q to quit
# raise StopIteration
except:
print("finish execption")
dataset.stop()
return "good"
if __name__ == '__main__':
rtsp_url = "rtsp://192.168.0.26:8554/"
while True:
for thread in threading.enumerate():
print(thread.name)
print(detect(rtsp_url))
dataset class file
import glob
import logging
import math
import os
import random
import shutil
import time
import re
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'stream'
self.img_size = img_size
self.capture = None
self.my_thread = None
self.stopFlag = False
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
s = sources[0]
# for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
# print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(eval(s) if s.isnumeric() else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
self.ret, self.imgs[0] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([0, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
self.capture = cap
self.my_thread = thread
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened() and not self.stopFlag:
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def stop(self):
self.stopFlag = True
try:
# self.capture.release()
# self.my_thrsead.join()
print("stop thread!!")
except:
print("ERROR stopping thread!!")
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
if not self.ret:
print("error!!!")
self.stop()
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
# def stop(self):
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|##!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 32), np.mod(dh, 32) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
while cap.isOpened() and not self.stopFlag:
this line is especially important because
without this line the threads will be stacked and will have memory error
as the stack stacks up.

how can I display flask selected video display

I want to display the selected video using a flask.
I select a video using HTML
<input id = "video_re" name = "video_select" accept = "video/*" type = "file" >
<input type="submit" value="testing" id="button_click" >
and then get the file name
#app.route('/testing', methods=['POST'])
def test():
f = request.files['video_select']
video_name = f.filename
return video_name
and display
def video_gray(selected):
camera = cv2.VideoCapture(selected)
while True:
success, frame = camera.read()
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # video grayscale
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', grayFrame)
grayFrame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + grayFrame + b'\r\n')
#app.route('/video_feed')
def video_feed():
return Response(video_gray(), mimetype='multipart/x-mixed-replace; boundary=frame')
html
<img src="{{url_for('video_feed')}}">
selected video name is video_name how can i how can i this value send to video_gray(selected) ?
or another way to display the selected video?
You can use the global variable, declare the video_name as global and you can use it anywhere you like
or try these, maybe this will help,
Just send the post data directly to the video_gray function
#app.route('/testing', methods=['POST'])
def video_gray():
f = request.files['video_select']
selected = f.filename
camera = cv2.VideoCapture(selected) ...
or you can just call your function in the test() function
#app.route('/testing', methods=['POST'])
def test():
f = request.files['video_select']
video_name = f.filename
video_gray(video_name)

Categories