how to do real time video/audio streaming in django - python

i want to stream the video and audio (and some real time data which i will get from precessing every fram) from surveillance camera into a django website ... i found this code that help me send frames to the client
'''
from django.shortcuts import render
from django.http import HttpResponse, StreamingHttpResponse
import cv2
import time
from django.views.decorators import gzip
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture('./streaming/video.mp4')
def __del__(self):
self.video.release()
def get_frame(self):
ret, image = self.video.read()
ret, jpeg = cv2.imencode('.jpg', image)
return jpeg.tobytes()
def gen(camera):
while True:
frame = camera.get_frame()
yield(b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
#gzip.gzip_page
def index(request):
try:
return StreamingHttpResponse(gen(VideoCamera()), content_type="multipart/x-mixed-replace;boundary=frame")
except HttpResponseServerError as e:
print("aborted")
but i dont know how to handle the audio and data and the synchronization . i want to know what technology i have to use and if there any tutorial or ideas about it. i really don't know what to read and how to start (i'm using django).

Follow for more details :
Github : https://github.com/JRodrigoF/AVrecordeR
class AudioRecorder():
# Audio class based on pyAudio and Wave
def __init__(self):
self.open = True
self.rate = 44100
self.frames_per_buffer = 1024
self.channels = 2
self.format = pyaudio.paInt16
self.audio_filename = "temp_audio.wav"
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
# Audio starts being recorded
def record(self):
self.stream.start_stream()
while(self.open == True):
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
if self.open==False:
break
# Finishes the audio recording therefore the thread too
def stop(self):
if self.open==True:
self.open = False
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
pass
# Launches the audio recording function using a thread
def start(self):
audio_thread = threading.Thread(target=self.record)
audio_thread.start()
Actully I don't have much idea with open cv2. But I think we cannot record video with audio simultaneously. You have to capture audio and video in thread.

Related

Why are exceptions not working?(Capture images from IP cameras)

This is my code, everything works fine and I can get the image from the ipcamera, but my problem is when the camera disconnects for some reason, I want the code to wait for the connection again! But the code doesn't end, it doesn't continue!
Edit it:
import csv
import cv2
import time
import queue, threading
import requests.exceptions
from time import sleep
import requests
with open('bin/Load/option_1.csv',encoding="utf-8") as option1 :
reader = csv.reader(option1)
option1 = list(reader)
ipcam1=option1[0][0]
camUser1=option1[1][0]
camPass1=option1[2][0]
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.cap.open(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
url1='rtsp://'+camUser1+':'+camPass1+'#'+ipcam1+':554/Streaming/channels/1/'
cap1= VideoCapture(url1)
while True:
try:
time.sleep(0) # simulate time between events
sleep(0.18)
frame = cap1.read()
with open("tools/croping_image_1.csv",newline="",encoding="utf-8") as cropi1:
reader = csv.reader(cropi1)
cropi1 = list(reader)
frame = frame[int(cropi1[0][0]):int(cropi1[1][0]), int(cropi1[2][0]):int(cropi1[3][0])]
cv2.imwrite("screen1.jpg", frame)
except requests.Timeout:
time.sleep(4)
print("one camera timeouterr")
except ConnectionError:
time.sleep(4)
print("one camera not ConnectionError")

How can I make my audio loop with pyaudio?

First of all I'm pretty new to this library and I don't really understand everything. With the help of the internet I managed to get this code snippet working. This code basically plays an audio file(.wav to be specific). The problem is that it only plays once; I want the audio file to loop until I set the is_looping variable to False.
import pyaudio
import wave
class AudioFile:
chunk = 1024
def __init__(self, file_dir):
""" Init audio stream """
self.wf = wave.open(file_dir, 'rb')
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
format=self.p.get_format_from_width(self.wf.getsampwidth()),
channels=self.wf.getnchannels(),
rate=self.wf.getframerate(),
output=True
)
def play(self):
""" Play entire file """
data = self.wf.readframes(self.chunk)
while data != '':
self.stream.write(data)
data = self.wf.readframes(self.chunk)
def close(self):
""" Graceful shutdown """
self.stream.close()
self.p.terminate()
is_looping = True
audio = AudioFile("___.wav")
audio.play()
audio.close()
I tried doing something like this, but it still didn't work:
is_looping = True
audio = AudioFile("___.wav")
while is_looping:
audio.play()
audio.close()
I couldn't find a way to loop the audio using my code, but I found a code in the internet that does exactly what I wanted it to do. Here's the link: https://gist.github.com/THeK3nger/3624478
And here is the code from that link:
import os
import wave
import threading
import sys
# PyAudio Library
import pyaudio
class WavePlayerLoop(threading.Thread):
CHUNK = 1024
def __init__(self, filepath, loop=True):
"""
Initialize `WavePlayerLoop` class.
PARAM:
-- filepath (String) : File Path to wave file.
-- loop (boolean) : True if you want loop playback.
False otherwise.
"""
super(WavePlayerLoop, self).__init__()
self.filepath = os.path.abspath(filepath)
self.loop = loop
def run(self):
# Open Wave File and start play!
wf = wave.open(self.filepath, 'rb')
player = pyaudio.PyAudio()
# Open Output Stream (based on PyAudio tutorial)
stream = player.open(format=player.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# PLAYBACK LOOP
data = wf.readframes(self.CHUNK)
while self.loop:
stream.write(data)
data = wf.readframes(self.CHUNK)
if data == b'': # If file is over then rewind.
wf.rewind()
data = wf.readframes(self.CHUNK)
stream.close()
player.terminate()
def play(self):
"""
Just another name for self.start()
"""
self.start()
def stop(self):
"""
Stop playback.
"""
self.loop = False
You just need to add something like this outside the class and it should work:
player = WavePlayerLoop("sounds/1.wav")
player.play()

Save an h264 video input from a drone camera as a video file on the Computer

I have a tello ryze drone, which has got a camera onboard. I am connecting to the drone in order to receive the video over wifi. My system is windows 10 and I am using python 2.7.
I am receiving a h264 bytecode and i use the libh264 decoder from tello in order to get the frames of the video, which i also display in my UI.
What i need to do is to save this video as a file on my computer and i have problems with this. I am able to make a snapshot with opencv and save it as an image, this isn't a problem. But making a video isn't working for some reason.
I read many posts here like this one but they dont work out for me. I am getting either an error or i get a very small video file, which doesnt open.
My Frames are List of List with RGB values like:
[[255,200,100][55,200,100][25,20,100]]
Here is my code for a better understanding in order to help me
This is the UI Part(i just copy here the needed code only):
def videoLoop(self):
try:
# start the thread that get GUI image and draw skeleton
time.sleep(0.5)
self.sending_command_thread.start()
while not self.stopEvent.is_set():
system = platform.system()
# read the frame for GUI show
self.frame = self.drone.read()
if self.frame is None or self.frame.size == 0:
continue
# transfer the format from frame to image
image = Image.fromarray(self.frame)
# we found compatibility problem between Tkinter,PIL and Macos,and it will
# sometimes result the very long preriod of the "ImageTk.PhotoImage" function,
# so for Macos,we start a new thread to execute the _updateGUIImage function.
if system == "Windows" or system == "Linux":
self.refreshUI(image)
else:
thread_tmp = threading.Thread(target=self.refreshUI, args=(image,))
thread_tmp.start()
time.sleep(0.03)
except RuntimeError as e:
print("[INFO] caught a RuntimeError")
def refreshUI(self, image):
image = ImageTk.PhotoImage(image)
# if the imagePanel none ,we need to initial it
if self.imagePanel is None:
self.imagePanel = tki.Label(image=image)
self.imagePanel.image = image
self.imagePanel.pack(side="left", fill="both",
expand="yes", padx=10, pady=10)
# otherwise, simply update the imagePanel
else:
self.imagePanel.configure(image=image)
self.imagePanel.image = image
def takeSnapshot(self):
# grab the current timestamp and use it to construct the filename
ts = datetime.datetime.now()
filename = "{}.jpg".format(ts.strftime("%d-%m-%Y_%H-%M-%S"))
p = os.path.sep.join((self.screenShotPath, filename))
# save the file
cv2.imwrite(p, cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR))
print("[INFO] saved {}".format(filename))
Below you can find the Tello code (only the needed part too):
def read(self):
"""Return the last frame from camera."""
if self.is_freeze:
return self.last_frame
else:
return self.frame
def video_freeze(self, is_freeze=True):
"""Pause video output -- set is_freeze to True"""
self.is_freeze = is_freeze
if is_freeze:
self.last_frame = self.frame
def _receive_video_thread(self):
"""
Listens for video streaming (raw h264) from the Tello.
Runs as a thread, sets self.frame to the most recent frame Tello captured.
"""
packet_data = b''
while True:
try:
res_string, ip = self.socket_video.recvfrom(2048)
packet_data += res_string
# end of frame
if len(res_string) != 1460:
for frame in self._h264_decod(packet_data):
self.frame = frame
packet_data = b''
except socket.error as exc:
print(("Caught exception sock.error : %s" % exc))
def _h264_decod(self, packet_data):
"""
decode raw h264 format data from Tello
:param packet_data: raw h264 data array
:return: a list of decoded frame
"""
res_frame_list = []
frames = self.decoder.decode(packet_data)
for framedata in frames:
(frame, w, h, ls) = framedata
if frame is not None:
# print ('frame size %i bytes, w %i, h %i, linesize %i' % (len(frame), w, h, ls))
frame = np.frombuffer(frame, dtype=np.ubyte, count=len(frame))
frame = (frame.reshape((h, ls // 3, 3)))
frame = frame[:, :w, :]
res_frame_list.append(frame)
return res_frame_list
It would be very kind of you if someone could help me write a method like this pseudocode:
def saveVideo(self, frame_or_whatever_i_need_here):
out = cv2.VideoWriter('output.avi_or_other_format', -1, 20.0, (640,480))
out.write(frame_or_whatever_i_need_here)
out.release()
Edit 1:
i found an option making a video out of my snapshots, this means that i coud make snapshot over a thread and later save them to a video. That would be an option. The problem is, that it would consume too much space in the disk. The workaround link is here
I found the solution, so i will post it here if someone needs the same thing. I used the following blog and modified the code to do my work, you can find the post here
self.frame = None
self.frame_array = []
def videoLoop(self):
try:
# start the thread that get GUI image and draw skeleton
time.sleep(0.5)
self.sending_command_thread.start()
while not self.stopEvent.is_set():
system = platform.system()
# read the frame for GUI show
self.frame = self.drone.read()
self.frame_array.append(self.frame)
if self.frame is None or self.frame.size == 0:
continue
# transfer the format from frame to image
image = Image.fromarray(self.frame)
if system == "Windows" or system == "Linux":
self.refreshUI(image)
else:
thread_tmp = threading.Thread(target=self.refreshUI, args=(image,))
thread_tmp.start()
time.sleep(0.03)
except RuntimeError as e:
print("[INFO] caught a RuntimeError")
def convert_frames_to_video(self, pathOut, fps):
size = (self.videoWidth, self.videoHeight)
out = cv2.VideoWriter(pathOut, cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
for i in range(len(self.frame_array)):
# writing to a image array
out.write(self.frame_array[i])
out.release()
def onClose(self):
print("[INFO] closing...")
self.convert_frames_to_video('video.avi', 25.0)
self.stopEvent.set()
del self.drone
self.root.quit()

How to read from video url or from usb camera using videoflow?

In the Videoflow python library there are examples on how to read from a video file, but how can I read video from the usb device in the computer?
It was just added into it. You can do something like this. Just be sure to set the device id of your camera properly in the code.
import videoflow
import videoflow.core.flow as flow
from videoflow.core.constants import REALTIME
from videoflow.producers import VideoDeviceReader
from videoflow.consumers import VideofileWriter
class FrameIndexSplitter(videoflow.core.node.ProcessorNode):
def __init__(self):
super(FrameIndexSplitter, self).__init__()
def process(self, data):
index, frame = data
return frame
def main():
output_file = "output.avi"
device_id = 0
reader = VideoDeviceReader(device_id)
frame = FrameIndexSplitter()(reader)
writer = VideofileWriter(output_file, fps = 30)(frame)
fl = flow.Flow([reader], [writer], flow_type = REALTIME)
fl.run()
fl.join()
if __name__ == "__main__":
main()

How to capture a video (AND audio) in python, from a camera (or webcam)

i'm looking for a solution, either in linux or in windows, that allows me to
record video (+audio) from my webcam & microphone, simultaneously.
save it as a file.AVI (or mpg or whatever)
display the video on the screen while recording it
Compression is NOT an issue in my case, and i actually prefer to capture RAW and compress it later.
So far i've done it with an ActiveX component in VB which took care of everything, and i'd like to progress with python (the VB solution is unstable, unreliable).
so far i've seen code that captures VIDEO only, or individual frames...
I've looked so far at
OpenCV - couldn't find audio capture there
PyGame - no simultaneous audio capture (AFAIK)
VideoCapture - provide only single frames.
SimpleCV - no audio
VLC - binding to VideoLAN program into wxPthon - hopefully it will do (still investigating this option)
kivy - just heard about it, didn't manage to get it working under windows SO FAR.
The question - is there a video & audio capture library for python?
or - what are the other options if any?
Answer: No. There is no single library/solution in python to do video/audio recording simultaneously. You have to implement both separately and merge the audio and video signal in a smart way to end up with a video/audio file.
I got a solution for the problem you present. My code addresses your three issues:
Records video + audio from webcam and microphone simultaneously.
It saves the final video/audio file as .AVI
Un-commenting lines 76, 77 and 78 will make the video to be displayed to screen while recording.
My solution uses pyaudio for audio recording, opencv for video recording, and ffmpeg for muxing the two signals. To be able to record both simultaneously, I use multithreading. One thread records video, and a second one the audio. I have uploaded my code to github and also have included all the essential parts it here.
https://github.com/JRodrigoF/AVrecordeR
Note: opencv is not able to control the fps at which the webcamera does the recording. It is only able to specify in the encoding of the file the desired final fps, but the webcamera usually behaves differently according to specifications and light conditions (I found). So the fps have to be controlled at the level of the code.
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
class VideoRecorder():
# Video class based on openCV
def __init__(self):
self.open = True
self.device_index = 0
self.fps = 6 # fps should be the minimum constant rate at which the camera can
self.fourcc = "MJPG" # capture images (with no decrease in speed over time; testing is required)
self.frameSize = (640,480) # video formats and sizes also depend and vary according to the camera used
self.video_filename = "temp_video.avi"
self.video_cap = cv2.VideoCapture(self.device_index)
self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize)
self.frame_counts = 1
self.start_time = time.time()
# Video starts being recorded
def record(self):
# counter = 1
timer_start = time.time()
timer_current = 0
while(self.open==True):
ret, video_frame = self.video_cap.read()
if (ret==True):
self.video_out.write(video_frame)
# print str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current)
self.frame_counts += 1
# counter += 1
# timer_current = time.time() - timer_start
time.sleep(0.16)
# gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('video_frame', gray)
# cv2.waitKey(1)
else:
break
# 0.16 delay -> 6 fps
#
# Finishes the video recording therefore the thread too
def stop(self):
if self.open==True:
self.open=False
self.video_out.release()
self.video_cap.release()
cv2.destroyAllWindows()
else:
pass
# Launches the video recording function using a thread
def start(self):
video_thread = threading.Thread(target=self.record)
video_thread.start()
class AudioRecorder():
# Audio class based on pyAudio and Wave
def __init__(self):
self.open = True
self.rate = 44100
self.frames_per_buffer = 1024
self.channels = 2
self.format = pyaudio.paInt16
self.audio_filename = "temp_audio.wav"
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
# Audio starts being recorded
def record(self):
self.stream.start_stream()
while(self.open == True):
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
if self.open==False:
break
# Finishes the audio recording therefore the thread too
def stop(self):
if self.open==True:
self.open = False
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
pass
# Launches the audio recording function using a thread
def start(self):
audio_thread = threading.Thread(target=self.record)
audio_thread.start()
def start_AVrecording(filename):
global video_thread
global audio_thread
video_thread = VideoRecorder()
audio_thread = AudioRecorder()
audio_thread.start()
video_thread.start()
return filename
def start_video_recording(filename):
global video_thread
video_thread = VideoRecorder()
video_thread.start()
return filename
def start_audio_recording(filename):
global audio_thread
audio_thread = AudioRecorder()
audio_thread.start()
return filename
def stop_AVrecording(filename):
audio_thread.stop()
frame_counts = video_thread.frame_counts
elapsed_time = time.time() - video_thread.start_time
recorded_fps = frame_counts / elapsed_time
print "total frames " + str(frame_counts)
print "elapsed time " + str(elapsed_time)
print "recorded fps " + str(recorded_fps)
video_thread.stop()
# Makes sure the threads have finished
while threading.active_count() > 1:
time.sleep(1)
# Merging audio and video signal
if abs(recorded_fps - 6) >= 0.01: # If the fps rate was higher/lower than expected, re-encode it to the expected
print "Re-encoding"
cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi"
subprocess.call(cmd, shell=True)
print "Muxing"
cmd = "ffmpeg -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi"
subprocess.call(cmd, shell=True)
else:
print "Normal recording\nMuxing"
cmd = "ffmpeg -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi"
subprocess.call(cmd, shell=True)
print ".."
# Required and wanted processing of final files
def file_manager(filename):
local_path = os.getcwd()
if os.path.exists(str(local_path) + "/temp_audio.wav"):
os.remove(str(local_path) + "/temp_audio.wav")
if os.path.exists(str(local_path) + "/temp_video.avi"):
os.remove(str(local_path) + "/temp_video.avi")
if os.path.exists(str(local_path) + "/temp_video2.avi"):
os.remove(str(local_path) + "/temp_video2.avi")
if os.path.exists(str(local_path) + "/" + filename + ".avi"):
os.remove(str(local_path) + "/" + filename + ".avi")
To the questions asked above: Yes the code should also works under Python3. I adjusted it a little bit and now works for python2 and python3 (tested it on windows7 with 2.7 and 3.6, though you need to have ffmpeg installed or the executable ffmpeg.exe at least in the same directory, you can get it here: https://www.ffmpeg.org/download.html ). Of course you also need all the other libraries cv2, numpy, pyaudio, installed like herewith:
pip install opencv-python numpy pyaudio
You can now run the code directly:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# VideoRecorder.py
from __future__ import print_function, division
import numpy as np
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
class VideoRecorder():
"Video class based on openCV"
def __init__(self, name="temp_video.avi", fourcc="MJPG", sizex=640, sizey=480, camindex=0, fps=30):
self.open = True
self.device_index = camindex
self.fps = fps # fps should be the minimum constant rate at which the camera can
self.fourcc = fourcc # capture images (with no decrease in speed over time; testing is required)
self.frameSize = (sizex, sizey) # video formats and sizes also depend and vary according to the camera used
self.video_filename = name
self.video_cap = cv2.VideoCapture(self.device_index)
self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize)
self.frame_counts = 1
self.start_time = time.time()
def record(self):
"Video starts being recorded"
# counter = 1
timer_start = time.time()
timer_current = 0
while self.open:
ret, video_frame = self.video_cap.read()
if ret:
self.video_out.write(video_frame)
# print(str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current))
self.frame_counts += 1
# counter += 1
# timer_current = time.time() - timer_start
time.sleep(1/self.fps)
# gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('video_frame', gray)
# cv2.waitKey(1)
else:
break
def stop(self):
"Finishes the video recording therefore the thread too"
if self.open:
self.open=False
self.video_out.release()
self.video_cap.release()
cv2.destroyAllWindows()
def start(self):
"Launches the video recording function using a thread"
video_thread = threading.Thread(target=self.record)
video_thread.start()
class AudioRecorder():
"Audio class based on pyAudio and Wave"
def __init__(self, filename="temp_audio.wav", rate=44100, fpb=1024, channels=2):
self.open = True
self.rate = rate
self.frames_per_buffer = fpb
self.channels = channels
self.format = pyaudio.paInt16
self.audio_filename = filename
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
def record(self):
"Audio starts being recorded"
self.stream.start_stream()
while self.open:
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
if not self.open:
break
def stop(self):
"Finishes the audio recording therefore the thread too"
if self.open:
self.open = False
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
def start(self):
"Launches the audio recording function using a thread"
audio_thread = threading.Thread(target=self.record)
audio_thread.start()
def start_AVrecording(filename="test"):
global video_thread
global audio_thread
video_thread = VideoRecorder()
audio_thread = AudioRecorder()
audio_thread.start()
video_thread.start()
return filename
def start_video_recording(filename="test"):
global video_thread
video_thread = VideoRecorder()
video_thread.start()
return filename
def start_audio_recording(filename="test"):
global audio_thread
audio_thread = AudioRecorder()
audio_thread.start()
return filename
def stop_AVrecording(filename="test"):
audio_thread.stop()
frame_counts = video_thread.frame_counts
elapsed_time = time.time() - video_thread.start_time
recorded_fps = frame_counts / elapsed_time
print("total frames " + str(frame_counts))
print("elapsed time " + str(elapsed_time))
print("recorded fps " + str(recorded_fps))
video_thread.stop()
# Makes sure the threads have finished
while threading.active_count() > 1:
time.sleep(1)
# Merging audio and video signal
if abs(recorded_fps - 6) >= 0.01: # If the fps rate was higher/lower than expected, re-encode it to the expected
print("Re-encoding")
cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi"
subprocess.call(cmd, shell=True)
print("Muxing")
cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi"
subprocess.call(cmd, shell=True)
else:
print("Normal recording\nMuxing")
cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi"
subprocess.call(cmd, shell=True)
print("..")
def file_manager(filename="test"):
"Required and wanted processing of final files"
local_path = os.getcwd()
if os.path.exists(str(local_path) + "/temp_audio.wav"):
os.remove(str(local_path) + "/temp_audio.wav")
if os.path.exists(str(local_path) + "/temp_video.avi"):
os.remove(str(local_path) + "/temp_video.avi")
if os.path.exists(str(local_path) + "/temp_video2.avi"):
os.remove(str(local_path) + "/temp_video2.avi")
# if os.path.exists(str(local_path) + "/" + filename + ".avi"):
# os.remove(str(local_path) + "/" + filename + ".avi")
if __name__ == '__main__':
start_AVrecording()
time.sleep(5)
stop_AVrecording()
file_manager()
I would recommend ffmpeg. There is a python wrapper.
http://code.google.com/p/pyffmpeg/
I've been looking around for a good answer to this, and I think it is GStreamer...
The documentation for the python bindings is extremely light, and most of it seemed centered around the old 0.10 version of GStreamer instead of the new 1.X versions, but GStreamer is a extremely powerful, cross-platform multimedia framework that can stream, mux, transcode, and display just about anything.
I used JRodrigoF's script for a while on a project. However, I noticed that sometimes the threads would hang and it would cause the program to crash. Another issue is that openCV does not capture video frames at a reliable rate and ffmpeg would distort my video when re-encoding.
I came up with a new solution that records much more reliably and with much higher quality for my application. It presently only works for Windows because it uses pywinauto and the built-in Windows Camera app. The last bit of the script does some error-checking to confirm the video successfully recorded by checking the timestamp of the name of the video.
https://gist.github.com/mjdargen/956cc968864f38bfc4e20c9798c7d670
import pywinauto
import time
import subprocess
import os
import datetime
def win_record(duration):
subprocess.run('start microsoft.windows.camera:', shell=True) # open camera app
# focus window by getting handle using title and class name
# subprocess call opens camera and gets focus, but this provides alternate way
# t, c = 'Camera', 'ApplicationFrameWindow'
# handle = pywinauto.findwindows.find_windows(title=t, class_name=c)[0]
# # get app and window
# app = pywinauto.application.Application().connect(handle=handle)
# window = app.window(handle=handle)
# window.set_focus() # set focus
time.sleep(2) # have to sleep
# take control of camera window to take video
desktop = pywinauto.Desktop(backend="uia")
cam = desktop['Camera']
# cam.print_control_identifiers()
# make sure in video mode
if cam.child_window(title="Switch to Video mode", auto_id="CaptureButton_1", control_type="Button").exists():
cam.child_window(title="Switch to Video mode", auto_id="CaptureButton_1", control_type="Button").click()
time.sleep(1)
# start then stop video
cam.child_window(title="Take Video", auto_id="CaptureButton_1", control_type="Button").click()
time.sleep(duration+2)
cam.child_window(title="Stop taking Video", auto_id="CaptureButton_1", control_type="Button").click()
# retrieve vids from camera roll and sort
dir = 'C:/Users/m/Pictures/Camera Roll'
all_contents = list(os.listdir(dir))
vids = [f for f in all_contents if "_Pro.mp4" in f]
vids.sort()
vid = vids[-1] # get last vid
# compute time difference
vid_time = vid.replace('WIN_', '').replace('_Pro.mp4', '')
vid_time = datetime.datetime.strptime(vid_time, '%Y%m%d_%H_%M_%S')
now = datetime.datetime.now()
diff = now - vid_time
# time different greater than 2 minutes, assume something wrong & quit
if diff.seconds > 120:
quit()
subprocess.run('Taskkill /IM WindowsCamera.exe /F', shell=True) # close camera app
print('Recorded successfully!')
win_record(2)
If you notice misalignment between video and audio by the code above, please see my solution below
I think the most rated answer above does a great job. However, it did not work perfectly when I was using it,especially when you use a low fps rate (say 10). The main issue is with video recording. In order to properly synchronize video and audio recording with ffmpeg, one has to make sure that cv2.VideoCapture() and cv2.VideoWriter() share exact same FPS, because the recorded video time length is solely determined by fps rate and the number of frames.
Following is my suggested update:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# VideoRecorder.py
from __future__ import print_function, division
import numpy as np
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
import ffmpeg
class VideoRecorder():
"Video class based on openCV"
def __init__(self, name="temp_video.avi", fourcc="MJPG", sizex=640, sizey=480, camindex=0, fps=30):
self.open = True
self.device_index = camindex
self.fps = fps # fps should be the minimum constant rate at which the camera can
self.fourcc = fourcc # capture images (with no decrease in speed over time; testing is required)
self.frameSize = (sizex, sizey) # video formats and sizes also depend and vary according to the camera used
self.video_filename = name
self.video_cap = cv2.VideoCapture(self.device_index)
self.video_cap.set(cv2.CAP_PROP_FPS, self.fps)
self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize)
self.frame_counts = 1
self.start_time = time.time()
def record(self):
"Video starts being recorded"
# counter = 1
timer_start = time.time()
timer_current = 0
while self.open:
ret, video_frame = self.video_cap.read()
if ret:
self.video_out.write(video_frame)
# print(str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current))
self.frame_counts += 1
# print(self.frame_counts)
# counter += 1
# timer_current = time.time() - timer_start
# time.sleep(1/self.fps)
# gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('video_frame', gray)
# cv2.waitKey(1)
else:
break
def stop(self):
"Finishes the video recording therefore the thread too"
if self.open:
self.open=False
self.video_out.release()
self.video_cap.release()
cv2.destroyAllWindows()
def start(self):
"Launches the video recording function using a thread"
video_thread = threading.Thread(target=self.record)
video_thread.start()
class AudioRecorder():
"Audio class based on pyAudio and Wave"
def __init__(self, filename="temp_audio.wav", rate=44100, fpb=1024, channels=2):
self.open = True
self.rate = rate
self.frames_per_buffer = fpb
self.channels = channels
self.format = pyaudio.paInt16
self.audio_filename = filename
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
def record(self):
"Audio starts being recorded"
self.stream.start_stream()
while self.open:
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
if not self.open:
break
def stop(self):
"Finishes the audio recording therefore the thread too"
if self.open:
self.open = False
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
def start(self):
"Launches the audio recording function using a thread"
audio_thread = threading.Thread(target=self.record)
audio_thread.start()
def start_AVrecording(filename="test"):
global video_thread
global audio_thread
video_thread = VideoRecorder()
audio_thread = AudioRecorder()
audio_thread.start()
video_thread.start()
return filename
def start_video_recording(filename="test"):
global video_thread
video_thread = VideoRecorder()
video_thread.start()
return filename
def start_audio_recording(filename="test"):
global audio_thread
audio_thread = AudioRecorder()
audio_thread.start()
return filename
def stop_AVrecording(filename="test"):
audio_thread.stop()
frame_counts = video_thread.frame_counts
elapsed_time = time.time() - video_thread.start_time
recorded_fps = frame_counts / elapsed_time
print("total frames " + str(frame_counts))
print("elapsed time " + str(elapsed_time))
print("recorded fps " + str(recorded_fps))
video_thread.stop()
# Makes sure the threads have finished
while threading.active_count() > 1:
time.sleep(1)
video_stream = ffmpeg.input(video_thread.video_filename)
audio_stream = ffmpeg.input(audio_thread.audio_filename)
ffmpeg.output(audio_stream, video_stream, 'out.mp4').run(overwrite_output=True)
# # Merging audio and video signal
# if abs(recorded_fps - 6) >= 0.01: # If the fps rate was higher/lower than expected, re-encode it to the expected
# print("Re-encoding")
# cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi"
# subprocess.call(cmd, shell=True)
# print("Muxing")
# cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi"
# subprocess.call(cmd, shell=True)
# else:
# print("Normal recording\nMuxing")
# cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi"
# subprocess.call(cmd, shell=True)
# print("..")
def file_manager(filename="test"):
"Required and wanted processing of final files"
local_path = os.getcwd()
if os.path.exists(str(local_path) + "/temp_audio.wav"):
os.remove(str(local_path) + "/temp_audio.wav")
if os.path.exists(str(local_path) + "/temp_video.avi"):
os.remove(str(local_path) + "/temp_video.avi")
if os.path.exists(str(local_path) + "/temp_video2.avi"):
os.remove(str(local_path) + "/temp_video2.avi")
# if os.path.exists(str(local_path) + "/" + filename + ".avi"):
# os.remove(str(local_path) + "/" + filename + ".avi")
if __name__ == '__main__':
start_AVrecording()
# try:
# while True:
# pass
# except KeyboardInterrupt:
# stop_AVrecording()
time.sleep(10)
stop_AVrecording()
print("finishing recording")
file_manager()
Using everyone's contributions and following the suggestion of Paul
I was able to come up with the following code:
Recorder.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# VideoRecorder.py
from __future__ import print_function, division
import numpy as np
import sys
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
import ffmpeg
REC_FOLDER = "recordings/"
class Recorder():
def __init__(self, filename):
self.filename = filename
self.video_thread = self.VideoRecorder(self, REC_FOLDER + filename)
self.audio_thread = self.AudioRecorder(self, REC_FOLDER + filename)
def startRecording(self):
self.video_thread.start()
self.audio_thread.start()
def stopRecording(self):
self.video_thread.stop()
self.audio_thread.stop()
def saveRecording(self):
#Save audio / Show video resume
self.audio_thread.saveAudio()
self.video_thread.showFramesResume()
#Merges both streams and writes
video_stream = ffmpeg.input(self.video_thread.video_filename)
audio_stream = ffmpeg.input(self.audio_thread.audio_filename)
while (not os.path.exists(self.audio_thread.audio_filename)):
print("waiting for audio file to exit...")
stream = ffmpeg.output(video_stream, audio_stream, REC_FOLDER + self.filename +".mp4")
try:
ffmpeg.run(stream, capture_stdout=True, capture_stderr=True, overwrite_output=True)
except ffmpeg.Error as e:
print(e.stdout, file=sys.stderr)
print(e.stderr, file=sys.stderr)
class VideoRecorder():
"Video class based on openCV"
def __init__(self, recorder, name, fourcc="MJPG", frameSize=(640,480), camindex=0, fps=15):
self.recorder = recorder
self.open = True
self.duration = 0
self.device_index = camindex
self.fps = fps # fps should be the minimum constant rate at which the camera can
self.fourcc = fourcc # capture images (with no decrease in speed over time; testing is required)
self.video_filename = name + ".avi" # video formats and sizes also depend and vary according to the camera used
self.video_cap = cv2.VideoCapture(self.device_index, cv2.CAP_DSHOW)
self.video_writer = cv2.VideoWriter_fourcc(*fourcc)
self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, frameSize)
self.frame_counts = 1
self.start_time = time.time()
def record(self):
"Video starts being recorded"
counter = 1
while self.open:
ret, video_frame = self.video_cap.read()
if ret:
self.video_out.write(video_frame)
self.frame_counts += 1
counter += 1
self.duration += 1/self.fps
if (video_frame is None): print("I WAS NONEEEEEEEEEEEEEEEEEEEEEE")
gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('video_frame', gray)
cv2.waitKey(1)
while(self.duration - self.recorder.audio_thread.duration >= 0.2 and self.recorder.audio_thread.open):
time.sleep(0.2)
else:
break
#Release Video
self.video_out.release()
self.video_cap.release()
cv2.destroyAllWindows()
self.video_out = None
def stop(self):
"Finishes the video recording therefore the thread too"
self.open=False
def start(self):
"Launches the video recording function using a thread"
self.thread = threading.Thread(target=self.record)
self.thread.start()
def showFramesResume(self):
#Only stop of video has all frames
frame_counts = self.frame_counts
elapsed_time = time.time() - self.start_time
recorded_fps = self.frame_counts / elapsed_time
print("total frames " + str(frame_counts))
print("elapsed time " + str(elapsed_time))
print("recorded fps " + str(recorded_fps))
class AudioRecorder():
"Audio class based on pyAudio and Wave"
def __init__(self, recorder, filename, rate=44100, fpb=1024, channels=1, audio_index=0):
self.recorder = recorder
self.open = True
self.rate = rate
self.duration = 0
self.frames_per_buffer = fpb
self.channels = channels
self.format = pyaudio.paInt16
self.audio_filename = filename + ".wav"
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
input_device_index=audio_index,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
def record(self):
"Audio starts being recorded"
self.stream.start_stream()
t_start = time.time_ns()
while self.open:
try:
self.duration += self.frames_per_buffer / self.rate
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
except Exception as e:
print('\n' + '*'*80)
print('PyAudio read exception at %.1fms\n' % ((time.time_ns() - t_start)/10**6))
print(e)
print('*'*80 + '\n')
while(self.duration - self.recorder.video_thread.duration >= 0.5):
time.sleep(0.5)
#Closes audio stream
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
def stop(self):
"Finishes the audio recording therefore the thread too"
self.open = False
def start(self):
"Launches the audio recording function using a thread"
self.thread = threading.Thread(target=self.record)
self.thread.start()
def saveAudio(self):
#Save Audio File
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
Main.py
from recorder import Recorder
import time
recorder = Recorder("test1")
recorder.startRecording()
time.sleep(240)
recorder.stopRecording()
recorder.saveRecording()
With this solution, the camera and the audio will wait for each other.
I also tried the FFmpeg Re-encoding and Muxing and even though it was able to synchronize the audio with video, the video had a massive quality drop.
You can do offline html,js code to do video with audio recording. Using python lib python webview open that page. It should work fine.
I was randomly getting "[Errno -9999] Unanticipated host error" while using JRodrigoF's solution and found that it's due to a race condition where an audio stream can be closed just before being read for the last time inside record() of AudioRecorder class.
I modified slightly so that all the closing procedures are done after the while loop and added a function list_audio_devices() that shows the list of audio devices to select from. I also added an audio device index as a parameter to choose an audio device.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# VideoRecorder.py
from __future__ import print_function, division
import numpy as np
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
class VideoRecorder():
"Video class based on openCV"
def __init__(self, name="temp_video.avi", fourcc="MJPG", sizex=640, sizey=480, camindex=0, fps=30):
self.open = True
self.device_index = camindex
self.fps = fps # fps should be the minimum constant rate at which the camera can
self.fourcc = fourcc # capture images (with no decrease in speed over time; testing is required)
self.frameSize = (sizex, sizey) # video formats and sizes also depend and vary according to the camera used
self.video_filename = name
self.video_cap = cv2.VideoCapture(self.device_index)
self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize)
self.frame_counts = 1
self.start_time = time.time()
def record(self):
"Video starts being recorded"
# counter = 1
timer_start = time.time()
timer_current = 0
while self.open:
ret, video_frame = self.video_cap.read()
if ret:
self.video_out.write(video_frame)
# print(str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current))
self.frame_counts += 1
# counter += 1
# timer_current = time.time() - timer_start
time.sleep(1/self.fps)
# gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('video_frame', gray)
# cv2.waitKey(1)
else:
break
def stop(self):
"Finishes the video recording therefore the thread too"
if self.open:
self.open=False
self.video_out.release()
self.video_cap.release()
cv2.destroyAllWindows()
def start(self):
"Launches the video recording function using a thread"
video_thread = threading.Thread(target=self.record)
video_thread.start()
class AudioRecorder():
"Audio class based on pyAudio and Wave"
def __init__(self, filename="temp_audio.wav", rate=44100, fpb=2**12, channels=1, audio_index=0):
self.open = True
self.rate = rate
self.frames_per_buffer = fpb
self.channels = channels
self.format = pyaudio.paInt16
self.audio_filename = filename
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
input_device_index=audio_index,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
def record(self):
"Audio starts being recorded"
self.stream.start_stream()
t_start = time.time_ns()
while self.open:
try:
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
except Exception as e:
print('\n' + '*'*80)
print('PyAudio read exception at %.1fms\n' % ((time.time_ns() - t_start)/10**6))
print(e)
print('*'*80 + '\n')
time.sleep(0.01)
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
def stop(self):
"Finishes the audio recording therefore the thread too"
if self.open:
self.open = False
def start(self):
"Launches the audio recording function using a thread"
audio_thread = threading.Thread(target=self.record)
audio_thread.start()
def start_AVrecording(filename="test", audio_index=0, sample_rate=44100):
global video_thread
global audio_thread
video_thread = VideoRecorder()
audio_thread = AudioRecorder(audio_index=audio_index, rate=sample_rate)
audio_thread.start()
video_thread.start()
return filename
def start_video_recording(filename="test"):
global video_thread
video_thread = VideoRecorder()
video_thread.start()
return filename
def start_audio_recording(filename="test", audio_index=0, sample_rate=44100):
global audio_thread
audio_thread = AudioRecorder(audio_index=audio_index, rate=sample_rate)
audio_thread.start()
return filename
def stop_AVrecording(filename="test"):
audio_thread.stop()
frame_counts = video_thread.frame_counts
elapsed_time = time.time() - video_thread.start_time
recorded_fps = frame_counts / elapsed_time
print("total frames " + str(frame_counts))
print("elapsed time " + str(elapsed_time))
print("recorded fps " + str(recorded_fps))
video_thread.stop()
# Makes sure the threads have finished
while threading.active_count() > 1:
time.sleep(1)
# Merging audio and video signal
if abs(recorded_fps - 6) >= 0.01: # If the fps rate was higher/lower than expected, re-encode it to the expected
print("Re-encoding")
cmd = "ffmpeg -r " + str(recorded_fps) + " -i temp_video.avi -pix_fmt yuv420p -r 6 temp_video2.avi"
subprocess.call(cmd, shell=True)
print("Muxing")
cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video2.avi -pix_fmt yuv420p " + filename + ".avi"
subprocess.call(cmd, shell=True)
else:
print("Normal recording\nMuxing")
cmd = "ffmpeg -y -ac 2 -channel_layout stereo -i temp_audio.wav -i temp_video.avi -pix_fmt yuv420p " + filename + ".avi"
subprocess.call(cmd, shell=True)
print("..")
def file_manager(filename="test"):
"Required and wanted processing of final files"
local_path = os.getcwd()
if os.path.exists(str(local_path) + "/temp_audio.wav"):
os.remove(str(local_path) + "/temp_audio.wav")
if os.path.exists(str(local_path) + "/temp_video.avi"):
os.remove(str(local_path) + "/temp_video.avi")
if os.path.exists(str(local_path) + "/temp_video2.avi"):
os.remove(str(local_path) + "/temp_video2.avi")
# if os.path.exists(str(local_path) + "/" + filename + ".avi"):
# os.remove(str(local_path) + "/" + filename + ".avi")
def list_audio_devices(name_filter=None):
pa = pyaudio.PyAudio()
device_index = None
sample_rate = None
for x in range(pa.get_device_count()):
info = pa.get_device_info_by_index(x)
print(pa.get_device_info_by_index(x))
if name_filter is not None and name_filter in info['name']:
device_index = info['index']
sample_rate = int(info['defaultSampleRate'])
break
return device_index, sample_rate
if __name__ == '__main__':
start_AVrecording()
time.sleep(5)
stop_AVrecording()
file_manager()

Categories