I have a .webm feed from a 3rd party camera source. I would like to redistribute this to my web clients. I can open the stream in OpenCV then redistribute the .jpg images which works perfectly with img tags. However, I would like to convert this into a .webm or .mjpeg stream for use with video.js or other video tag. I don't have enough experience with HTTP protocol or video formatting and all my research only shows this .jpg streaming method.
models.py
streamManagerLock = threading.Lock()
streamManagers = {}
class StreamManager:
active = True
lastTime = time.time()
image = None
def __init__(self, cameraObj):
self.camera = cameraObj
url = "https://" + str(cameraObj.system.systemID) + ".relay-la.vmsproxy.com"
self.stream = cv.VideoCapture("{url}/web/media/{camID}.webm?auth={auth}".format(url=url,
camID=cameraObj.cam_id,
auth=cameraObj.system.getAuth()))
self.stream.set(cv.CAP_PROP_BUFFERSIZE, 5)
self.killThread = threading.Thread(target=self.selfTerminate, daemon=True)
self.lastTime = time.time()
self.killThread.start()
self.updateThread = threading.Thread(target=self.update_frame, daemon=True)
self.updateThread.start()
def selfTerminate(self):
while True:
if time.time() - self.lastTime > 3:
break
time.sleep(1)
self.terminate()
def terminate(self):
with streamManagerLock:
streamManagers.pop(str(self.camera.cam_id))
self.active = False
self.stream.release()
def update_frame(self):
while self.active:
ret, image = self.stream.read()
if ret:
ret, jpeg = cv.imencode('.jpg', image)
self.image = jpeg.tobytes()
else:
ret, img = cv.imencode('.jpg', np.zeros([100, 100, 3], dtype=np.uint8))
return img.tobytes()
time.sleep(1/30)
def get_frame(self):
self.lastTime = time.time()
if self.image is None:
ret, img = cv.imencode('.jpg', np.zeros([100, 100, 3], dtype=np.uint8))
return img.tobytes()
return self.image
def getCameraStream(cameraObj):
with streamManagerLock:
if str(cameraObj.cam_id) in streamManagers.keys():
return streamManagers[str(cameraObj.cam_id)]
else:
r = StreamManager(cameraObj)
streamManagers.update({str(cameraObj.cam_id): r})
return r
class Camera(models.Model):
cam_id = models.UUIDField()
label = models.CharField(max_length=100)
visible = models.ManyToManyField(CompanyModel, blank=True)
system = models.ForeignKey(DWCloudSystem, verbose_name="System", on_delete=models.CASCADE)
profile = models.ManyToManyField(UserProfile, blank=True)
isPublic = models.BooleanField(default=False)
def getStream(self):
def gen():
stream = getCameraStream(self)
while True:
frame = stream.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
try:
return StreamingHttpResponse(gen(), content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
return HttpResponseServerError()
def getThumbnail(self, height):
url = "ec2/cameraThumbnail"
params = {"cameraId": str(self.cam_id), "time": "LATEST", "height": 300, "ignoreExternalArchive": None}
response = self.system.proxyRequest(url, params=params)
if response is None:
time.sleep(3)
response = self.system.proxyRequest(url, params=params)
if response is None:
return HttpResponseServerError()
return HttpResponse(
content=response.content,
status=response.status_code,
content_type=response.headers['Content-Type']
)
def __str__(self):
return "{}: {}".format(str(self.system.company_key), str(self.label))
views.py
stream/{uuid:camID}
#handleAPIAuth
def getStream(request, camID):
cameras = getAllCameras(request)
cameras = cameras.filter(cam_id=camID)
if len(cameras) != 1:
return HttpResponseForbidden()
camera = cameras[0]
return camera.getStream()
template.html
video tag
<video
id="cam-stream"
class="video-js"
controls
preload="auto"
width="640"
height="264"
data-setup="{}"
>
<source id="stream-source" src="stream/49289ede-a66e-c436-839c-6141fd7f8f87" type="application/x-mpegURL" />
<p class="vjs-no-js">
To view this video please enable JavaScript, and consider upgrading to a
web browser that
supports HTML5 video
</p>
</video>
When a thumbnail gets clicked:
<script type="text/javascript">
function changeImage(img){
document.getElementById("stream-source").src = "stream/"+img.getAttribute("camID");
}
</script>
Related
I have one rtsp server with multiple videos hosted using multiprocessing queues as channels using sensor factory objects using gstreamer. The reason for using multi processing queue is because of the limitation of nvh264enc encoder in the pipeline(The teardown causes the ports to be nonusable in subsequent client connections, no problem in x264enc). The problem happens when multiple clients are connected to the same link, even though the factory is shared between clients with same media element. This problem occurs. The results can be seen in clients such as vlc player.
"""
This module will provide encoder functionality
"""
from base64 import decode
from gc import get_stats
from os import stat
from queue import Empty
import sys
import itertools
import numpy as np
import logging
import threading
import multiprocessing as mp
import gi
from pprint import pprint
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GObject, GLib
import cv2
import subprocess
import time
GObject.threads_init()
Gst.init(None)
# def on_debug(category, level, dfile, dfctn, dline, source, message, user_data):
# if source:
# print('Debug {} {}: {}'.format(
# Gst.DebugLevel.get_name(level), source.name, message.get()))
# else:
# print('Debug {}: {}'.format(
# Gst.DebugLevel.get_name(level), message.get()))
# if not Gst.debug_is_active():
# Gst.debug_set_active(True)
# level = Gst.debug_get_default_threshold()
# Gst.debug_set_default_threshold(Gst.DebugLevel.INFO)
# if level < Gst.DebugLevel.ERROR:
# Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
# Gst.debug_add_log_function(on_debug, None)
# Gst.debug_remove_log_function(Gst.debug_log_default)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',
handlers=[logging.FileHandler('GPU_enabled_frame_encode.log'), logging.StreamHandler(sys.stdout)])
logging.debug('Debug message')
logging.info('Info message')
logging.warning('Warning message')
logging.error('Error message')
logging.critical('Critical message')
class SensorFactory(GstRtspServer.RTSPMediaFactory):
def __init__(self, fps, img_shape, cols, verbosity=1, cap=None, speed_preset='medium', properties={}):
super(SensorFactory, self).__init__(**properties)
logging.info("sensory factory")
self.rtsp_media = None
self.height = int(img_shape[0])
self.width = int(img_shape[1] * cols)
self.number_frames = 0
self.stream_timestamp = 0.0
self.timestamp = time.time()
self.dt = 0.0
self.streamed_frames = 0
self.verbosity = verbosity
fps = int(fps)
self.cap = cap
self.appsrc = None
# duration of a frame in nanoseconds nvh264enc x264enc
self.duration = 1.0 / fps * Gst.SECOND
key_int_max = ' key-int-max={} '.format(fps)
caps_str = 'caps=video/x-raw,format=BGR,width={},height={},framerate={}/1 '.format(self.width,
self.height,
fps)
self.launch_string = 'appsrc name=source is-live=true block=true do-timestamp=true \
format=GST_FORMAT_TIME ' + caps_str + \
' ! queue' \
' ! videoconvert' \
' ! video/x-raw,format=I420' \
' ! nvh264enc' \
' ! rtph264pay config-interval=1 pt=96 name=pay0' \
''
def set_cap(self, cap):
self.cap = cap
def on_need_data(self, src, length):
# this method executes when client requests data
# logging.info("this method executes when client requests data")
# if self.cap.isOpened():
# _, frame = self.cap.get_frame()
# ret = True
if self.cap.isOpened():
frame_queue = self.cap.get_queue()
frame = frame_queue.get()
ret = True
if ret:
if frame.shape[:2] != (self.height, self.width):
frame = cv2.resize(frame, (self.width, self.height))
data = frame.tostring()
buf = Gst.Buffer.new_allocate(None, len(data), None)
buf.fill(0, data)
buf.duration = self.duration
timestamp = self.number_frames * self.duration
buf.pts = buf.dts = int(timestamp)
buf.offset = timestamp
self.number_frames += 1
retval = self.appsrc.emit('push-buffer', buf)
# print('pushed buffer, frame {}, duration {} ns, durations {} s'.format(self.number_frames,
# self.duration,
# self.duration / Gst.SECOND))
if retval != Gst.FlowReturn.OK:
logging.info("[INFO]: retval not OK: {}".format(retval))
if retval == Gst.FlowReturn.FLUSHING:
logging.info('Offline')
elif self.verbosity > 0:
logging.info("[INFO]: Unable to read frame from cap.")
# time.sleep(0.05)
def do_create_element(self, url):
if self.verbosity > 0:
request_uri = url.get_request_uri()
logging.info('[INFO]: stream request on {}'.format(request_uri))
return Gst.parse_launch(self.launch_string)
def do_configure(self, rtsp_media):
self.rtsp_media = rtsp_media
rtsp_media.set_reusable(True)
self.number_frames = 0
self.appsrc = rtsp_media.get_element().get_child_by_name('source')
# executes when client requests data
self.appsrc.connect('need-data', self.on_need_data)
def get_rtsp_media(self):
if self.rtsp_media:
return self.rtsp_media
def __del__(self):
print('Destructor called, factory deleted.')
class RTSP_utility_server(GstRtspServer.RTSPServer):
def __init__(self, fps, suffix='test', rtp_port=8554,
ip='12.0.0.0', caps=(None,), Sizes=[[1080, 1920]],
speed_preset='medium', verbosity=1, Indexes=[]):
GObject.threads_init()
Gst.init(None)
super(RTSP_utility_server, self).__init__(**{})
self.verbosity = verbosity
self.rtp_port = "{}".format(rtp_port)
if int(self.rtp_port) < 1024 and self.verbosity > 0:
logging.info(
'[INFO]: Note, admin privileges are required because port number < 1024.')
self.set_service(self.rtp_port)
self.speed_preset = speed_preset
self.caps = caps
self.factory = [None] * len(self.caps)
self.suffix = suffix
self.fps = fps
self.Sizes = Sizes
self.Indexes = Indexes
self.attach(None)
self.ip = self.get_ip()
self.media_path_list = [None] * len(self.caps)
self.clients_list = []
if len(self.suffix):
self.full_suffix = '/' + self.suffix.lstrip('/')
else:
self.full_suffix = ''
self.connect("client-connected", self.client_connected)
logging.info(
'[INFO]: streaming on:\n\trtsp://{}:{}/{}#'.format(self.ip, self.rtp_port, self.suffix))
self.status_thread = threading.Thread(target=self.status_thread_loop)
self.status_thread.daemon = True
self.status_thread.start()
self.context = GLib.MainContext()
print(self.attach(self.context))
def set_caps(self, caps):
if not isinstance(caps, (list, tuple)):
caps = [caps]
self.caps = caps
def create_media_factories(self):
mount_points = self.get_mount_points()
media_path_list = []
for i, cap in enumerate(self.caps):
img_shape = self.Sizes[i]
if len(self.Indexes) == 0:
N_Index = str(i + 1)
else:
N_Index = str(self.Indexes[i])
factory = SensorFactory(fps=self.fps, img_shape=img_shape, speed_preset=self.speed_preset,
cols=1, verbosity=self.verbosity, cap=cap)
factory.set_shared(True)
factory.set_stop_on_disconnect(True)
logging.info('inside media_factories Stream on ' +
self.full_suffix + N_Index)
logging.info('inside media_factories Stream on ' +
self.full_suffix + N_Index)
mount_points.add_factory(self.full_suffix + N_Index, factory)
self.factory[i] = factory
media_path_list.append(self.full_suffix + N_Index)
self.media_path_list = media_path_list
self.get_status()
def destroy_media_factories(self):
session_pool = self.get_session_pool()
logging.info("Number of sessions are :" +
str(session_pool.get_n_sessions()))
sessions_list = session_pool.filter()
for session in sessions_list:
for path in self.get_paths():
media_matched, _ = session.get_media(path)
if media_matched:
rtsp_media = media_matched.get_media()
rtsp_media.set_eos_shutdown(True)
rtsp_media.unprepare()
logging.debug("media removed for path "+path)
number_of_disconnects = session_pool.cleanup()
if number_of_disconnects > 0:
logging.info("number of disconnects:"+str(number_of_disconnects))
def destroy_media_factories_by_path(self,path_to_remove="/video1"):
session_pool = self.get_session_pool()
logging.info("Number of sessions are :" +
str(session_pool.get_n_sessions()))
sessions_list = session_pool.filter()
for session in sessions_list:
for path in self.get_paths():
media_matched, _ = session.get_media(path)
if media_matched and path==path_to_remove:
rtsp_media = media_matched.get_media()
rtsp_media.set_eos_shutdown(True)
rtsp_media.unprepare()
logging.debug("media removed for path "+path)
number_of_disconnects = session_pool.cleanup()
if number_of_disconnects > 0:
logging.info("number of disconnects:"+str(number_of_disconnects))
def client_connected(self, gst_server_obj, rtsp_client_obj):
logging.info('[INFO]: Client has connected')
self.create_media_factories()
self.clients_list.append(rtsp_client_obj)
if self.verbosity > 0:
logging.info('[INFO]: Client has connected')
def stop_all(self):
self.destroy_media_factories()
def stop_by_index(self,path):
self.destroy_media_factories_by_path(path)
def get_paths(self):
return self.media_path_list
def get_status(self):
mount_points = self.get_mount_points()
session_pool = self.get_session_pool()
# logging.info("Number of sessions are :" +
# str(session_pool.get_n_sessions()))
number_of_disconnects = session_pool.cleanup()
if number_of_disconnects > 0:
logging.info("number of disconnects:"+str(number_of_disconnects))
for path in self.get_paths():
sessions_list = session_pool.filter()
for session in sessions_list:
session.set_timeout(1)
media_matched, _ = session.get_media(path)
if media_matched:
rtsp_media = media_matched.get_media()
status = rtsp_media.get_status() #<enum GST_RTSP_MEDIA_STATUS_PREPARED of type GstRtspServer.RTSPMediaStatus>
if "GST_RTSP_MEDIA_STATUS_UNPREPARING" in str(status):
# print(self.context)
# GObject.Object.unref(self)
# print(status)
# transport = media_matched.get_transport(0)
# transport.set_active(False)
# print(enum_list)
# print(rtsp_media.unprepare())
# rtsp_media.set_eos_shutdown(True)
# session.set_timeout(1)
# session.release_media(media_matched)
# session.allow_expire()
# session.release_media(media_matched)
# media_matched.set_state(Gst.State.PAUSED)
# media_matched.set_state(Gst.State.READY)
# media_matched.set_state(Gst.State.NULL)
# rtsp_media.suspend()
# print(rtsp_media.unprepare())
# rtsp_media.set_pipeline_state(Gst.State.READY)
# rtsp_media.set_pipeline_state(Gst.State.NULL)
# rtsp_media.unprepare()
# self.client_filter()
print("removing")
def status_thread_loop(self):
while True:
time.sleep(2)
try:
self.get_status()
except Exception as e:
print(e)
def __del__(self):
print("Destroy called")
def get_current_encoders_details(self):
enc_details = dict()
for i,cap in enumerate(self.caps):
enc_details[i]={"path":self.media_path_list[i],"decoder":cap,"factory":self.factory[i]}
return enc_details
#staticmethod
def get_ip():
return subprocess.check_output("hostname -I", shell=True).decode('utf-8').split(' ')[0]
class encoders():
"""
This is the containter for multiple encoders
"""
def __init__(self,decoders=[],head=[],resolutions=[],suffix="video") -> None:
self.encoder_list = []
self.caps = decoders
self.fps = 60
Sizes = resolutions
self.enc_obj = RTSP_utility_server(self.fps, Sizes=Sizes, speed_preset="medium", caps=decoders, suffix=suffix,
verbosity=1, rtp_port=8554, ip='10.5.1.130')
def get_encoders(self):
return self.enc_obj.get_current_encoders_details()
def set_frame(self,enc_id=0,frame=np.zeros((360, 640, 3))):
encoder_objects = self.enc_obj.get_current_encoders_details()
decoder_obj = None
for id in self.enc_obj.get_current_encoders_details():
if id == enc_id:
decoder_obj = encoder_objects[id]["decoder"]
break
if decoder_obj:
decoder_obj.set_canvas(frame)
else:
print("Decoder not available")
def stop_all(self):
self.enc_obj.stop_all()
def stop_by_index(self,path="/video1"):
self.enc_obj.stop_by_index(path)
def get_status(self):
self.enc_obj.get_status()
the mp queue logic is as following
while True:
# print(f'Thread {self.media_id} Looking')
if self.stop_signal.is_set():
self.stop_signal.clear()
self.active = False
break
ret, frame = self.capture.read()
if ret and type(frame) != type(None):
self.frame = frame.copy()
self.latest_frame = frame.copy()
self.recieving_signal.set()
self.latest_frame_time = time.ctime()
try:
self.frame_queue.put(self.frame,block=False)
except Full:
pass
Here capture is cv2.VideoCapture from an offline file. Is there any way to improve the quality of the video without stuttering with new connections.
I'm trying to stream the depth and RGB video of a depth camera (Intel D455) through web.
I'm re-using a script from here: https://pyshine.com/Live-streaming-multiple-videos-on-a-webpage/
My probleme is the following, when I start the script and the 2 threads, the 2 ports (9000 and 9001 will display the last thread) but if I only start one of them, the good port display the good video (and of course the other doesn't work...
Do you have an idea where I've made a mistake? (Maybe in the pipeline?)
Thanks
Here is the code:
import cv2
import pyshine as ps
from multiprocessing import Process
import pyrealsense2 as rs
import numpy as np
import threading
HTML="""
<html>
<head>
<title>PyShine Live Streaming</title>
</head>
<body>
<center><h1> PyShine Live Streaming Multiple videos </h1></center>
<center><img src="10.112.33.161:9000/stream.mjpg" width='360' height='240' autoplay playsinline></center>
</body>
</html>
"""
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
class ImgCapture():
def __init__(self):
pass
def read(self):
# Wait for a coherent pair of frames: depth and color
self.frames = pipeline.wait_for_frames()
depth_frame = self.frames.get_depth_frame()
color_frame = self.frames.get_color_frame()
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_BONE)
depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape
# If depth and color resolutions are different, resize color image to match depth image for display
if depth_colormap_dim != color_colormap_dim:
color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
return(color_image, depth_colormap)
def isOpened(self):
ret, _, _ = self.rs.get_frame_stream()
return(ret)
class ImgDepth():
def __init__(self, cap):
self.capture = cap
pass
def read(self):
color_frame, depth_colormap = self.capture.read()
if depth_colormap is not None:
ret = True
return(ret,depth_colormap)
def isOpened(self):
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret)
class ImgColor():
def __init__(self,cap):
self.capture = cap
pass
def read(self):
# capture = ImgCapture()
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret, color_image)
def isOpened(self):
# capture = ImgCapture()
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret)
def color():
StreamProps = ps.StreamProps
StreamProps.set_Page(StreamProps,HTML)
address = ('10.112.33.161',9001) # Enter your IP address
try:
StreamProps.set_Mode(StreamProps,'cv2')
capture0 = ImgCapture()
capture1 = ImgColor(capture0)
StreamProps.set_Capture(StreamProps,capture1)
StreamProps.set_Quality(StreamProps,90)
server = ps.Streamer(address,StreamProps)
print('Server started at','http://'+address[0]+':'+str(address[1]))
server.serve_forever()
print('done')
except KeyboardInterrupt:
pipeline.stop()
server.socket.close()
def depth():
StreamProps = ps.StreamProps
StreamProps.set_Page(StreamProps,HTML)
address = ('10.112.33.161',9000) # Enter your IP address
try:
StreamProps.set_Mode(StreamProps,'cv2')
capture0 = ImgCapture()
capture2 = ImgDepth(capture0)
StreamProps.set_Capture(StreamProps,capture2)
StreamProps.set_Quality(StreamProps,90)
server = ps.Streamer(address,StreamProps)
print('Server started at','http://'+address[0]+':'+str(address[1]))
server.serve_forever()
except KeyboardInterrupt:
pipeline.stop()
server.socket.close()
if __name__=='__main__':
# Start streaming
pipeline.start(config)
t1 = threading.Thread(target=depth).start()
t2 = threading.Thread(target=color).start()
If someone need to stream Depth Camera from Intel:
import cv2
import pyrealsense2 as rs
import numpy as np
from flask import Flask, render_template, Response
app = Flask('hello')
HTML="""
<html>
<head>
<title>PyShine Live Streaming</title>
</head>
<body>
<center><h1> PyShine Live Streaming Multiple videos </h1></center>
<center><img src="youradresse:port/stream.mjpg" width='360' height='240' autoplay playsinline></center>
</body>
</html>
"""
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
# Get device product line for setting a supporting resolution
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
else:
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
class ImgCapture():
def __init__(self):
pass
def read(self):
# Wait for a coherent pair of frames: depth and color
self.frames = pipeline.wait_for_frames()
depth_frame = self.frames.get_depth_frame()
color_frame = self.frames.get_color_frame()
# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Convert image of 16uint to 2x 8uint
# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.convertScaleAbs(depth_image, alpha=0.03)
depth_colormap_dim = depth_colormap.shape
color_colormap_dim = color_image.shape
# If depth and color resolutions are different, resize color image to match depth image for display
if depth_colormap_dim != color_colormap_dim:
color_image = cv2.resize(color_image, dsize=(depth_colormap_dim[1], depth_colormap_dim[0]), interpolation=cv2.INTER_AREA)
return(color_image, depth_colormap)
def isOpened(self):
ret, _, _ = self.rs.get_frame_stream()
return(ret)
class ImgDepth():
def __init__(self, cap):
self.capture = cap
pass
def read(self):
color_frame, depth_colormap = self.capture.read()
if depth_colormap is not None:
ret = True
return(ret,depth_colormap)
def isOpened(self):
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret)
class ImgColor():
def __init__(self,cap):
self.capture = cap
pass
def read(self):
# capture = ImgCapture()
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret, color_image)
def isOpened(self):
# capture = ImgCapture()
color_image, depth_colormap = self.capture.read()
if color_image is not None:
ret = True
return(ret)
def gen_frames_depth():
while True:
success, DEPTH = capture_depth.read()
print(DEPTH)
if not success:
break
else:
_, buffer_DEPTH = cv2.imencode('.jpg', DEPTH)
frame_depth = buffer_DEPTH.tobytes()
yield (b'--frame\r\n'
b'Content-Type:image/jpeg\r\n'
b'Content-Length: ' + f"{len(frame_depth)}".encode() + b'\r\n'
b'\r\n' + frame_depth + b'\r\n')
#app.route('/video_feed_depth')
def video_feed_depth():
return Response(gen_frames_depth(), mimetype='multipart/x-mixed-replace; boundary=--frame')
def gen_frames_color():
while True:
success, RGB = capture_color.read()
if not success:
break
else:
_, buffer_RGB = cv2.imencode('.jpg', RGB)
frame_RGB = buffer_RGB.tobytes()
yield (b'--frame\r\n'
b'Content-Type:image/jpeg\r\n'
b'Content-Length: ' + f"{len(frame_RGB)}".encode() + b'\r\n'
b'\r\n' + frame_RGB + b'\r\n')
#app.route('/video_feed_color')
def video_feed_color():
return Response(gen_frames_color(), mimetype='multipart/x-mixed-replace; boundary=--frame')
#app.route('/')
def index():
return """
<body>
<div class="container">
<div class="row">
<div class="col-lg-8 offset-lg-2">
<h3 class="mt-5">Live Streaming</h3>
<img src="/video_feed_depth" width="50%">
<img src="/video_feed_color" width="50%">
</div>
</div>
</div>
</body>
"""
if __name__=='__main__':
# Start streaming
pipeline.start(config)
capture0 = ImgCapture()
capture_depth = ImgDepth(capture0)
capture_color = ImgColor(capture0)
app.run(host="0.0.0.0")
I have a barcode reader which I implemented by using opensource Dynamsoft API and OpenCV.
Now I need to integrate it with Django and display on my website in React.
I have no idea how to do that, I tried passing the code to my views.py but don't know what I should do next.
Here is my code for barcode reading:
import cv2
from dbr import *
import time
reader = BarcodeReader()
def text_results_callback_func(frame_id, t_results, user_data):
print(frame_id)
for result in t_results:
text_result = TextResult(result)
print("Barcode Format : ")
print(text_result.barcode_format_string)
print("Barcode Text : ")
print(text_result.barcode_text)
print("Exception : ")
print(text_result.exception)
print("-------------")
def get_time():
localtime = time.localtime()
capturetime = time.strftime("%Y%m%d%H%M%S", localtime)
return capturetime
def read_barcode():
video_width = 0
video_height = 0
vc = cv2.VideoCapture(0)
video_width = vc.get(cv2.CAP_PROP_FRAME_WIDTH)
video_height = vc.get(cv2.CAP_PROP_FRAME_HEIGHT)
vc.set(3, video_width)
vc.set(4, video_height)
stride = 0
if vc.isOpened():
rval, frame = vc.read()
stride = frame.strides[0]
else:
return
windowName = "Barcode Reader"
parameters = reader.init_frame_decoding_parameters()
parameters.max_queue_length = 30
parameters.max_result_queue_length = 30
parameters.width = video_width
parameters.height = video_height
parameters.stride = stride
parameters.image_pixel_format = EnumImagePixelFormat.IPF_RGB_888
parameters.region_top = 0
parameters.region_bottom = 100
parameters.region_left = 0
parameters.region_right = 100
parameters.region_measured_by_percentage = 1
parameters.threshold = 0.01
parameters.fps = 0
parameters.auto_filter = 1
reader.start_video_mode(parameters, text_results_callback_func)
while True:
cv2.imshow(windowName, frame)
rval, frame = vc.read()
if rval == False:
break
try:
ret = reader.append_video_frame(frame)
except:
pass
key = cv2.waitKey(1)
if key == ord('q'):
break
reader.stop_video_mode()
cv2.destroyWindow(windowName)
print("-------------------start------------------------")
reader.init_license("***************************")
read_barcode()
I think you need to use the JS version of Dynamsoft Barcode Reader to scan barcodes using cameras in a webpage: https://www.dynamsoft.com/barcode-reader/sdk-javascript/
I want to display the selected video using a flask.
I select a video using HTML
<input id = "video_re" name = "video_select" accept = "video/*" type = "file" >
<input type="submit" value="testing" id="button_click" >
and then get the file name
#app.route('/testing', methods=['POST'])
def test():
f = request.files['video_select']
video_name = f.filename
return video_name
and display
def video_gray(selected):
camera = cv2.VideoCapture(selected)
while True:
success, frame = camera.read()
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # video grayscale
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', grayFrame)
grayFrame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + grayFrame + b'\r\n')
#app.route('/video_feed')
def video_feed():
return Response(video_gray(), mimetype='multipart/x-mixed-replace; boundary=frame')
html
<img src="{{url_for('video_feed')}}">
selected video name is video_name how can i how can i this value send to video_gray(selected) ?
or another way to display the selected video?
You can use the global variable, declare the video_name as global and you can use it anywhere you like
or try these, maybe this will help,
Just send the post data directly to the video_gray function
#app.route('/testing', methods=['POST'])
def video_gray():
f = request.files['video_select']
selected = f.filename
camera = cv2.VideoCapture(selected) ...
or you can just call your function in the test() function
#app.route('/testing', methods=['POST'])
def test():
f = request.files['video_select']
video_name = f.filename
video_gray(video_name)
i want to stream the video and audio (and some real time data which i will get from precessing every fram) from surveillance camera into a django website ... i found this code that help me send frames to the client
'''
from django.shortcuts import render
from django.http import HttpResponse, StreamingHttpResponse
import cv2
import time
from django.views.decorators import gzip
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture('./streaming/video.mp4')
def __del__(self):
self.video.release()
def get_frame(self):
ret, image = self.video.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
def gen(camera):
while True:
frame = camera.get_frame()
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
#gzip.gzip_page
def index(request):
try:
return StreamingHttpResponse(gen(VideoCamera()), content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("aborted")
but i dont know how to handle the audio and data and the synchronization . i want to know what technology i have to use and if there any tutorial or ideas about it. i really don't know what to read and how to start (i'm using django).
Follow for more details :
Github : https://github.com/JRodrigoF/AVrecordeR
class AudioRecorder():
# Audio class based on pyAudio and Wave
def __init__(self):
self.open = True
self.rate = 44100
self.frames_per_buffer = 1024
self.channels = 2
self.format = pyaudio.paInt16
self.audio_filename = "temp_audio.wav"
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
# Audio starts being recorded
def record(self):
self.stream.start_stream()
while(self.open == True):
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
if self.open==False:
break
# Finishes the audio recording therefore the thread too
def stop(self):
if self.open==True:
self.open = False
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
pass
# Launches the audio recording function using a thread
def start(self):
audio_thread = threading.Thread(target=self.record)
audio_thread.start()
Actully I don't have much idea with open cv2. But I think we cannot record video with audio simultaneously. You have to capture audio and video in thread.