Capture Sound from Mirophone and speakers - python

We are using amazon transcribe to make speach to text but we need to capture sound from both microphone and speakers. Do you think that this can be done with sounddevice or should we use something else?
amazon-transcribe-streaming-sdk
Mic Function
async def mic_stream():
# This function wraps the raw input stream from the microphone forwarding
# the blocks to an asyncio.Queue.
loop = asyncio.get_event_loop()
input_queue = asyncio.Queue()
def callback(indata, outdata, frame_count, time_info, status):
'''
if status:
print(status)
'''
#indata[:] = outdata
loop.call_soon_threadsafe(input_queue.put_nowait, (bytes(indata), status))
# Be sure to use the correct parameters for the audio stream that matches
# the audio formats described for the source language you'll be using:
# https://docs.aws.amazon.com/transcribe/latest/dg/streaming.html
stream = sounddevice.RawStream(
#device=3,
channels=1,
samplerate=16000,
callback=callback,
blocksize=1024 * 2,
dtype="int16",
)
# Initiate the audio stream and asynchronously yield the audio chunks
# as they become available.
with stream:
while True:
indata, status = await input_queue.get()
yield indata, status

Related

Azure Text-To-Spech to PyAudio Stream

I am trying to stream the output of an Azure text-to-speech instance to my speaker with PyAudio using Microsoft's sample code
I tried to write to PyAudio's stream inside Azure's callback function def write, but it gives me this error:
`my_stream.write(audio_buffer)
File "/opt/homebrew/lib/python3.10/site-packages/pyaudio.py", line 589
, in write pa.write_stream(self._stream, frames, num_frames,
TypeError: argument 2 must be read-only bytes-like object, not memoryview`
How do I handle Azure's output so that the PyAudio stream accepts it as audio data?
Full code:
`import azure.cognitiveservices.speech as speechsdk
import os, sys, pyaudio
pa = pyaudio.PyAudio()
my_text = "My emotional experiences are varied, but mostly involve trying to find a balance between understanding others’ feelings and managing my own. I also explore the intersection of emotion and technology through affective computing and related research."
voc_data = {
'channels': 1 if sys.platform == 'darwin' else 2,
'rate': 44100,
'width': pa.get_sample_size(pyaudio.paInt16),
'format': pyaudio.paInt16,
'frames': []
}
my_stream = pa.open(format=voc_data['format'],
channels=voc_data['channels'],
rate=voc_data['rate'],
output=True)
speech_key = os.getenv('SPEECH_KEY')
service_region = os.getenv('SPEECH_REGION')
def speech_synthesis_to_push_audio_output_stream():
"""performs speech synthesis and push audio output to a stream"""
class PushAudioOutputStreamSampleCallback(speechsdk.audio.PushAudioOutputStreamCallback):
"""
Example class that implements the PushAudioOutputStreamCallback, which is used to show
how to push output audio to a stream
"""
def __init__(self) -> None:
super().__init__()
self._audio_data = bytes(0)
self._closed = False
def write(self, audio_buffer: memoryview) -> int:
"""
The callback function which is invoked when the synthesizer has an output audio chunk
to write out
"""
self._audio_data += audio_buffer
my_stream.write(audio_buffer)
print("{} bytes received.".format(audio_buffer.nbytes))
return audio_buffer.nbytes
def close(self) -> None:
"""
The callback function which is invoked when the synthesizer is about to close the
stream.
"""
self._closed = True
print("Push audio output stream closed.")
def get_audio_data(self) -> bytes:
return self._audio_data
def get_audio_size(self) -> int:
return len(self._audio_data)
# Creates an instance of a speech config with specified subscription key and service region.
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# Creates customized instance of PushAudioOutputStreamCallback
stream_callback = PushAudioOutputStreamSampleCallback()
# Creates audio output stream from the callback
push_stream = speechsdk.audio.PushAudioOutputStream(stream_callback)
# Creates a speech synthesizer using push stream as audio output.
stream_config = speechsdk.audio.AudioOutputConfig(stream=push_stream)
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=stream_config)
# Receives a text from console input and synthesizes it to stream output.
while True:
# print("Enter some text that you want to synthesize, Ctrl-Z to exit")
# try:
# text = input()
# except EOFError:
# break
result = speech_synthesizer.speak_text_async(my_text).get()
# Check result
if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
print("Speech synthesized for text [{}], and the audio was written to output stream.".format(text))
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
print("Speech synthesis canceled: {}".format(cancellation_details.reason))
if cancellation_details.reason == speechsdk.CancellationReason.Error:
print("Error details: {}".format(cancellation_details.error_details))
# Destroys result which is necessary for destroying speech synthesizer
del result
# Destroys the synthesizer in order to close the output stream.
del speech_synthesizer
print("Totally {} bytes received.".format(stream_callback.get_audio_size()))
speech_synthesis_to_push_audio_output_stream()`
Here , I have a work around where instead of the using stream use a file. Where the audio will be stored in the file and then we simply read the file and play it using Py audio.
# Dependencies
import os
import azure.cognitiveservices.speech as speechsdk
import pyaudio
import wave
speech_config = speechsdk.SpeechConfig(subscription="<Key>", region="<Region>")
# Audio Config
audio_config = speechsdk.audio.AudioOutputConfig(filename="background.wav")
speech_config.speech_synthesis_voice_name='en-US-JennyNeural'
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
print("Enter the Text:- ")
text = input()
speech_synthesis_result = speech_synthesizer.speak_text_async(text).get()
print("Conversion is Complete")
filename = 'background.wav' # Same is in audio config
chunk = 1024
file = wave.open(filename, 'rb')
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(file.getsampwidth()),
channels = file.getnchannels(),
rate = file.getframerate(),
output = True)
data = file.readframes(chunk)
print("Starting Audio")
while data != '':
stream.write(data)
data = file.readframes(chunk)
stream.stop_stream()
stream.close()
p.terminate()
Here It will take more space and more time as we are reading the file.

How can I make my audio loop with pyaudio?

First of all I'm pretty new to this library and I don't really understand everything. With the help of the internet I managed to get this code snippet working. This code basically plays an audio file(.wav to be specific). The problem is that it only plays once; I want the audio file to loop until I set the is_looping variable to False.
import pyaudio
import wave
class AudioFile:
chunk = 1024
def __init__(self, file_dir):
""" Init audio stream """
self.wf = wave.open(file_dir, 'rb')
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
format=self.p.get_format_from_width(self.wf.getsampwidth()),
channels=self.wf.getnchannels(),
rate=self.wf.getframerate(),
output=True
)
def play(self):
""" Play entire file """
data = self.wf.readframes(self.chunk)
while data != '':
self.stream.write(data)
data = self.wf.readframes(self.chunk)
def close(self):
""" Graceful shutdown """
self.stream.close()
self.p.terminate()
is_looping = True
audio = AudioFile("___.wav")
audio.play()
audio.close()
I tried doing something like this, but it still didn't work:
is_looping = True
audio = AudioFile("___.wav")
while is_looping:
audio.play()
audio.close()
I couldn't find a way to loop the audio using my code, but I found a code in the internet that does exactly what I wanted it to do. Here's the link: https://gist.github.com/THeK3nger/3624478
And here is the code from that link:
import os
import wave
import threading
import sys
# PyAudio Library
import pyaudio
class WavePlayerLoop(threading.Thread):
CHUNK = 1024
def __init__(self, filepath, loop=True):
"""
Initialize `WavePlayerLoop` class.
PARAM:
-- filepath (String) : File Path to wave file.
-- loop (boolean) : True if you want loop playback.
False otherwise.
"""
super(WavePlayerLoop, self).__init__()
self.filepath = os.path.abspath(filepath)
self.loop = loop
def run(self):
# Open Wave File and start play!
wf = wave.open(self.filepath, 'rb')
player = pyaudio.PyAudio()
# Open Output Stream (based on PyAudio tutorial)
stream = player.open(format=player.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# PLAYBACK LOOP
data = wf.readframes(self.CHUNK)
while self.loop:
stream.write(data)
data = wf.readframes(self.CHUNK)
if data == b'': # If file is over then rewind.
wf.rewind()
data = wf.readframes(self.CHUNK)
stream.close()
player.terminate()
def play(self):
"""
Just another name for self.start()
"""
self.start()
def stop(self):
"""
Stop playback.
"""
self.loop = False
You just need to add something like this outside the class and it should work:
player = WavePlayerLoop("sounds/1.wav")
player.play()

How to pass live audio url to Google Speech to Text API

I have a url to live audio recording that I'm trying to transcribe using Google Speech to Text API. I am using an example code from the Cloud Speech to Text API. However, the problem is that when I pass the live url I do not receive any output. Below is the relevant portion of my code. Any help would be greatly appreciated!
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
import io
import os
import time
import requests
import numpy as np
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
from urllib.request import urlopen
from datetime import datetime
from datetime import timedelta
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]= "app_creds.json"
def get_stream():
stream = urlopen('streamurl')
duration = 60
begin = datetime.now()
duration = timedelta(seconds=duration)
while datetime.now() - begin < duration:
data = stream.read(8000)
return data
def transcribe_streaming():
"""Streams transcription of the given audio file."""
client = speech.SpeechClient()
content = get_stream()
# In practice, stream should be a generator yielding chunks of audio data.
stream = [content]
requests = (types.StreamingRecognizeRequest(audio_content=chunk)
for chunk in stream)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code='en-US')
streaming_config = types.StreamingRecognitionConfig(config=config)
# streaming_recognize returns a generator.
responses = client.streaming_recognize(streaming_config, requests)
for response in responses:
# Once the transcription has settled, the first result will contain the
# is_final result. The other results will be for subsequent portions of
# the audio.
for result in response.results:
print('Finished: {}'.format(result.is_final))
print('Stability: {}'.format(result.stability))
alternatives = result.alternatives
# The alternatives are ordered from most likely to least.
for alternative in alternatives:
print('Confidence: {}'.format(alternative.confidence))
print(u'Transcript: {}'.format(alternative.transcript))
When sending audio to the Google Speech service, make sure that the service object setup matches the audio encoding. In your particular case
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code='en-US')
corresponds to single channel, 16KHz, linear 16 bit PCM encoding. See the list of other supported encodings if you need to transcribe audio in different formats.
A part of my code I used a while back, I don't know if that may help:
def live_recognize_loop(self):
client = self.client
def is_running():
return self.recording
while self.recording:
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator(is_running)
requests = (types.StreamingRecognizeRequest(audio_content=content) for content in audio_generator)
responses = client.streaming_recognize(client.custom_streaming_config, requests)
responses_iterator = iter(responses)
while self.recording:
try:
response = next(responses_iterator)
except StopIteration:
break
except OutOfRange:
# Exception 400 - Exceeded maximum allowed stream duration of 65 seconds.
self.user_display(self.intermediateFrame.GetMessageText())
break # Start over
except ServiceUnavailable as e:
# Exception 503 - Getting metadata from plugin failed
self.log("{0} - NOT RECOGNIZED - {1}\n".format(self.getDate(), e))
break
except ResourceExhausted as e:
break
except GoogleAPICallError as e:
break
if response.results:
result = response.results[0]
if result.alternatives:
transcript = result.alternatives[0].transcript
self.intermediateFrame.SetMessageText(transcript)
if not result.is_final:
self.intermediateFrame.Display()
# print(transcript)
else:
self.user_display(transcript)
self.intermediateFrame.Display(False)
self.intermediateFrame.SetMessageText("")
#print("\t\t FINAL: %s" % transcript)
break # Start over
MicrophoneStream class
from __future__ import division
import pyaudio
from six.moves import queue
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
channels=1, rate=self._rate,
input=True, frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self, is_running=None):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if callable(is_running) and not is_running():
return
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
Try using:
import urllib
urllib.urlretrieve ("http://www.example.com/songs/mp3.mp3", "mp3.mp3")
(for Python 3+ use import urllib.request and urllib.request.urlretrieve)

I can't run a simple code using pyaudio - [Errno -9996] Invalid output device (no default output device)

(I'm new at python)
I'm trying to run a simple code about pyaudio. I just copied and pasted a code that I found on the pyaudio web site.
I get this error:
OSError Traceback (most recent call last)
<ipython-input-7-3fc52ceecbf3> in <module>()
15 channels=wf.getnchannels(),
16 rate=wf.getframerate(),
---> 17 output=True)
18
19 # read data
/home/gustavolg/anaconda3/lib/python3.5/site-packages/pyaudio.py in open(self, *args, **kwargs)
748 """
749
--> 750 stream = Stream(self, *args, **kwargs)
751 self._streams.add(stream)
752 return stream
/home/gustavolg/anaconda3/lib/python3.5/site-packages/pyaudio.py in __init__(self, PA_manager, rate, channels, format, input, output, input_device_index, output_device_index, frames_per_buffer, start, input_host_api_specific_stream_info, output_host_api_specific_stream_info, stream_callback)
439
440 # calling pa.open returns a stream object
--> 441 self._stream = pa.open(**arguments)
442
443 self._input_latency = self._stream.inputLatency
OSError: [Errno -9996] Invalid output device (no default output device)
I can not figure out how to solve this error. I don't know if this has something to do with audio driver or if the code needs an output declaration. I mean, if I have to select an output.
The code:
import pyaudio
import wave
import sys
CHUNK = 1024
wf = wave.open("/home/gustavolg/anaconda3/aPython/file.wav", 'rb')
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
# open stream (2)
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# read data
data = wf.readframes(CHUNK)
# play stream (3)
while len(data) > 0:
stream.write(data)
data = wf.readframes(CHUNK)
# stop stream (4)
stream.stop_stream()
stream.close()
# close PyAudio (5)
p.terminate()
I'm using python3 on Jupyter notebook.
check the following steps:
>>> import pyaudio
>>> pa = pyaudio.PyAudio()
>>> pa.get_default_input_device_info()
{'defaultLowOutputLatency': 0.008707482993197279,
'maxOutputChannels': 32,
'hostApi': 0,
'defaultSampleRate': 44100.0,
'defaultHighOutputLatency': 0.034829931972789115,
'name': 'default',
'index': 15,
'maxInputChannels': 32,
'defaultHighInputLatency': 0.034829931972789115,
'defaultLowInputLatency': 0.008707482993197279,
'structVersion': 2}
>>> pyaudio.pa.__file__
'/root/.virtualenvs/py3k/lib/python3.4/site-packages/_portaudio.cpython-34m.so'
make sure you have a default input device,if not you can refer to here
I want it's useful for you!
This may not be the issue that OP ran into, but I found a cause and fix of the [Errno -996] exception (and I thought I'd share this here since Google results lead here). Attempting to play multiple sounds with pyaudio at the same time can cause such a crash. For example, with something like this:
def playSound( self, soundName ):
audioFilePath = self._soundBank.get( soundName )
if not audioFilePath:
audioFilePath = os.path.join( globalData.paths['audioFolder'], soundName + ".wav" )
if not os.path.exists( audioFilePath ):
print( 'Invalid or missing sound file for', soundName )
return
self._soundBank[soundName] = audioFilePath
# Play the audio clip in a separate thread so that it's non-blocking
audioThread = Thread( target=self._playSoundHelper, args=(audioFilePath,) )
audioThread.start()
def _playSoundHelper( self, soundFilePath ):
""" Helper (thread-target) function for playSound(). Runs in a separate
thread to prevent audio playback from blocking anything else. """
p = None
wf = None
stream = None
try:
# Instantiate PyAudio and open the target audio file
p = pyaudio.PyAudio()
wf = wave.open( soundFilePath, 'rb' )
# Open an audio data stream
stream = p.open( format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True )
# Continuously read/write data from the file to the stream until there is no data left
data = wf.readframes( 1024 )
while len( data ) > 0:
stream.write( data )
data = wf.readframes( 1024 )
except AttributeError:
pass # Program probably closed while playing audio
except Exception as err:
soundFileName = os.path.basename( soundFilePath )
print( 'Unable to play "{}" sound.'.format(soundFileName) )
print( err )
# Stop the stream
if stream:
stream.stop_stream()
stream.close()
# Close PyAudio
if p:
p.terminate()
# Close the wav file
if wf:
wf.close()
Above, if playSound() is called too soon after another playSound(), the program will crash. However, this can be resolved by using an Event() object to mediate initialization of pyaudio and the stream so that only one may be initialized at a time, while still allowing the sounds to play back for-the-most-part simultaneously or 'on top' of each other (i.e the work in the data read/write loop portion of the thread). I added this like so:
def __init( self ):
self.audioGate = Event()
self.audioGate.set()
def _playSoundHelper( self, soundFilePath ):
""" Helper (thread-target) function for playSound(). Runs in a separate
thread to prevent audio playback from blocking anything else. """
p = None
wf = None
stream = None
try:
# Prevent race conditions on multiple sounds playing at once (can cause a crash); only allow one file to begin playing at a time
self.audioGate.wait() # Blocks until the following is done (event is re-set)
self.audioGate.clear()
# Instantiate PyAudio and open the target audio file
p = pyaudio.PyAudio()
wf = wave.open( soundFilePath, 'rb' )
# Open an audio data stream
stream = p.open( format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True )
self.audioGate.set() # Allow a new sound to be opened/initialized
# Continuously read/write data from the file to the stream until there is no data left
data = wf.readframes( 1024 )
while len( data ) > 0:
stream.write( data )
data = wf.readframes( 1024 )
except AttributeError:
pass # Program probably closed while playing audio
except Exception as err:
soundFileName = os.path.basename( soundFilePath )
print( 'Unable to play "{}" sound.'.format(soundFileName) )
print( err )
# Stop the stream
if stream:
stream.stop_stream()
stream.close()
# Close PyAudio
if p:
p.terminate()
# Close the wav file
if wf:
wf.close()
You could similarly use the Event to instead cancel any existing audio playback (by checking in the loop) before starting the latest one.

PyAudio Responsive Recording

I've seen the recording tutorial on the PyAudio website for recording a fixed length recording, but I was wondering how I could do the same with a non-fixed recording? Bascially, I want to create buttons to start and end the recording but I haven't found anything on the matter. Any ideas, and I am not looking for an alternative library?
Best is to use the non-blocking way of recording, i.e. you provide a callback function that gets called from the moment you start the stream and keeps getting called for every block/buffer that gets processed until you stop the stream.
In that callback function you check for a boolean for example, and when it is true you write the incoming buffer to a datastructure, when it is false you ignore the incoming buffer. This boolean can be set from clicking a button for example.
EDIT: look at the example of wire audio: http://people.csail.mit.edu/hubert/pyaudio/#wire-callback-example
The stream is opened with an argument
stream_callback=my_callback
Where my_callback is a regular function declared as
def my_callback(in_data, frame_count, time_info, status)
This function will be called every time a new buffer is available. in_data contains the input, which you want to record. In this example, in_data just gets returned in a tuple together with pyaudio.paContinue. Which means that the incoming buffer from the input device is put/copied back into the output buffer sent the the output device (its the same device, so its actually routing input to output aka wire). See the api docs for a bit more explanation: http://people.csail.mit.edu/hubert/pyaudio/docs/#pyaudio.PyAudio.open
So in this function you can do something like (this is an extract from some code I've written, which is not complete: I use some functions not depicted. Also I play a sinewave on one channel and noise on the other in 24bit format.):
record_on = False
playback_on = False
recorded_frames = queue.Queue()
def callback_play_sine(in_data, frame_count, time_info, status):
if record_on:
global recorded_frames
recorded_frames.put(in_data)
if playback_on:
left_channel_data = mysine.next_block(frame_count) * MAX_INT24 * gain
right_channel_data = ((np.random.rand(frame_count) * 2) - 1) * MAX_INT24 * gain
data = interleave_channels(max_nr_of_channels, (left_output_channel, left_channel_data), (right_output_channel, right_channel_data))
data = convert_int32_to_24bit_bytestream(data)
else:
data = np.zeros(frame_count*max_nr_of_channels).tostring()
if stop_callback:
callback_flag = pyaudio.paComplete
else:
callback_flag = pyaudio.paContinue
return data, callback_flag
You can then set record_on and playback_on to True or False from another part of your code while the stream is open/running, causing recording and playback to start or stop independently without interrupting the stream.
I copy the in_data in a (threadsafe) queue, which is used by another thread to write to disk there, else the queue will get big after a while.
BTW: pyaudio is based on portaudio, which has much more documentation and helpful tips. For example (http://portaudio.com/docs/v19-doxydocs/writing_a_callback.html): the callback function has to finish before a new buffer is presented, else buffers will be lost. So writing to a file inside the callback function usually not a good idea. (though writing to a file gets buffered and I don't know if it blocks when its written to disk eventually)
import pyaudio
import wave
import pygame, sys
from pygame.locals import *
pygame.init()
scr = pygame.display.set_mode((640, 480))
recording = True
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
while True:
if recording:
data = stream.read(CHUNK)
frames.append(data)
for event in pygame.event.get():
if event.type == KEYDOWN and recording:
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
recording = False
if event.type == QUIT:
pygame.quit(); sys.exit()
This is what I came up with when compiling it to an exe. Passing arguments to the
exeparser = argparse.ArgumentParser()
parser.add_argument('-t', dest='time', action='store')
args = parser.parse_args()
time = int(args.time)

Categories