Real-time audio signal processing using python - python

I have been trying to do real-time audio signal processing using 'pyAudio' module in python. What I did was a simple case of reading audio data from microphone and play it via headphones. I tried with the following code(both Python and Cython versions). Thought it works but unfortunately it is stalls and not smooth enough. How can I improve the code so that it will run smoothly. My PC is i7, 8GB RAM.
Python Version
import pyaudio
import numpy as np
RATE = 16000
CHUNK = 256
p = pyaudio.PyAudio()
player = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, output=True,
frames_per_buffer=CHUNK)
stream = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK)
for i in range(int(20*RATE/CHUNK)): #do this for 10 seconds
player.write(np.fromstring(stream.read(CHUNK),dtype=np.int16))
stream.stop_stream()
stream.close()
p.terminate()
Cython Version
import pyaudio
import numpy as np
cdef int RATE = 16000
cdef int CHUNK = 1024
cdef int i
p = pyaudio.PyAudio()
player = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, output=True, frames_per_buffer=CHUNK)
stream = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK)
for i in range(500): #do this for 10 seconds
player.write(np.fromstring(stream.read(CHUNK),dtype=np.int16))
stream.stop_stream()
stream.close()
p.terminate()

I believe you are missing CHUNK as second argument to player.write call.
player.write(np.fromstring(stream.read(CHUNK),dtype=np.int16),CHUNK)
Also, not sure if its formatting error. But player.write needs to be tabbed into for loop
And per pyaudio site you need to have RATE / CHUNK * RECORD_SECONDS and not RECORD *RATE/CHUNK as python executes * multiplication before / division.
for i in range(int(20*RATE/CHUNK)): #do this for 10 seconds
player.write(np.fromstring(stream.read(CHUNK),dtype=np.int16),CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
Finally, you may want to increase rate to 44100 , CHUNK to 1024 and CHANNEL to 2 for better fidelity.

The code below will take the default input device, and output what's recorded into the default output device.
import PyAudio
import numpy as np
p = pyaudio.PyAudio()
CHANNELS = 2
RATE = 44100
def callback(in_data, frame_count, time_info, flag):
# using Numpy to convert to array for processing
# audio_data = np.fromstring(in_data, dtype=np.float32)
return in_data, pyaudio.paContinue
stream = p.open(format=pyaudio.paFloat32,
channels=CHANNELS,
rate=RATE,
output=True,
input=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(20)
stream.stop_stream()
print("Stream is stopped")
stream.close()
p.terminate()
This will run for 20 seconds and stop. The method callback is where you can process the signal :
audio_data = np.fromstring(in_data, dtype=np.float32)
return in_data is where you send back post-processed data to the output device.
Note chunk has a default argument of 1024 as noted in the PyAudio docs:
http://people.csail.mit.edu/hubert/pyaudio/docs/#pyaudio.PyAudio.open

I am working on a similar project. I modified your code and the stalls now are gone. The bigger the chunk the bigger the delay. That is why I kept it low.
import pyaudio
import numpy as np
CHUNK = 2**5
RATE = 44100
LEN = 10
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK)
player = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, output=True, frames_per_buffer=CHUNK)
for i in range(int(LEN*RATE/CHUNK)): #go for a LEN seconds
data = np.fromstring(stream.read(CHUNK),dtype=np.int16)
player.write(data,CHUNK)
stream.stop_stream()
stream.close()
p.terminate()

Related

Python PyAudio is Processing all data into one channel

I am using pyaudio to record data from a two channel soundcard using this function
def record(self):
'''
Record Function reads from stream with configured soundcard and stores items in an array
uses callback function as can be told to stop recording during stream. After ending writes contents
to wav file
'''
wf = wave.open('audiooutput.wav', 'wb')
wf.setnchannels(2)
wf.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
wf.setframerate(44100)
p = pyaudio.PyAudio()
frames = []
# sub function checks the queue for a message to stop recording
def check_for_recordstop():
try:
message = self.my_queue.get(timeout = 0.1)
except:
return
if message == None:
pass
elif message.payload == "Stop":
self.confirm_message_recieved(message)
stream.stop_stream()
#subfunction callback
def callback(in_data, frame_count, time_info, status):
if stream.is_active():
frames.append(in_data)
return (in_data, pyaudio.paContinue)
else:
frames.append(in_data)
return (in_data, pyaudio.paComplete)
stream = p.open(format=pyaudio.get_sample_size(pyaudio.paInt16),
channels= 2,
rate=44100,
input=True,
frames_per_buffer=1024,
input_device_index=1,
stream_callback = callback)
self.logger.info("Recording")
stream.start_stream() # callback is run on a new thread when start_stream() is triggered
while stream.is_active(): #Loop to keep thread alive while callback is running
time.sleep(0.1)
check_for_recordstop()
print("done")
stream.close()
wf.writeframes(b''.join(frames))
wf.close()
p.terminate()
However when viewing this data in audacity I do not end up with 2 channel stream it looks like this
But When using a Function like this
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
input_device_index=1,
frames_per_buffer=CHUNK)
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open("pyaudoutput.wav", 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
the audio data comes out as expected see here
I cannot seem to get the first function to produce the desired results from the second!
I need to use the callback capability of pyaudio for my usecase but cannot get the two channels to separate. Any Advice would be great!
I was opening the stream using the sample size not the sample width
format=pyaudio.get_sample_size(pyaudio.paInt16),
should be
format=pyaudio.get_format_from_width(wf.getsampwidth()),
using the sample width from the file I writing to solved the issue

Python - How to record system audio(The output from the speaker)?

I have been searching for this since last week. Tried pyaudio also and when i used its another fork the system audio was mixed with microphone audio. I was not able to find any other module for this and thus finally asked the question.
Edit:
import pyaudio
import wave
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "output.wav"
p = pyaudio.PyAudio()
SPEAKERS = p.get_default_output_device_info()["hostApi"] #The modified part
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK,
input_host_api_specific_stream_info=SPEAKERS,
as_loopback = True) #The part I have modified
print("* recording")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS) + 1):
data = stream.read(CHUNK)
frames.append(data)
print("* done recording")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
This code was taken from stack overflow. It records the speaker output but the output is mixed with the microphone Input.
Also the pyaudio module used was from the fork : https://github.com/intxcc/pyaudio_portaudio.
using https://github.com/intxcc/pyaudio_portaudio
This only records the audio of the device specified by "device_id"
import pyaudio
import wave
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 2
fs = 44100 # Record at 44100 samples per second
seconds = 3
filename = "output.wav"
p = pyaudio.PyAudio() # Create an interface to PortAudio
#Select Device
print ( "Available devices:\n")
for i in range(0, p.get_device_count()):
info = p.get_device_info_by_index(i)
print ( str(info["index"]) + ": \t %s \n \t %s \n" % (info["name"], p.get_host_api_info_by_index(info["hostApi"])["name"]))
pass
#ToDo change to your device ID
device_id = 7
device_info = p.get_device_info_by_index(device_id)
channels = device_info["maxInputChannels"] if (device_info["maxOutputChannels"] < device_info["maxInputChannels"]) else device_info["maxOutputChannels"]
# https://people.csail.mit.edu/hubert/pyaudio/docs/#pyaudio.Stream.__init__
stream = p.open(format=sample_format,
channels=channels,
rate=int(device_info["defaultSampleRate"]),
input=True,
frames_per_buffer=chunk,
input_device_index=device_info["index"],
as_loopback=True
)
frames = [] # Initialize array to store frames
print('\nRecording', device_id, '...\n')
# Store data in chunks for 3 seconds
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the PortAudio interface
p.terminate()
print('Finished recording')
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
P.S. check out https://github.com/intxcc/pyaudio_portaudio/tree/master/example
This can be done with soundcard. You will have to figure out which device index to use for your loopback. This code prints out the ones you will have to choose from. I found the correct one by looping over all of them and seeing which produced non zeros when speakers were playing.
pip install soundcard
import soundcard as sc
import time
# get a list of all speakers:
speakers = sc.all_speakers()
# get the current default speaker on your system:
default_speaker = sc.default_speaker()
# get a list of all microphones:v
mics = sc.all_microphones(include_loopback=True)
# get the current default microphone on your system:
default_mic = mics[index of your speaker loopback here]
for i in range(len(mics)):
try:
print(f"{i}: {mics[i].name}")
except Exception as e:
print(e)
with default_mic.recorder(samplerate=148000) as mic, \
default_speaker.player(samplerate=148000) as sp:
print("Recording...")
data = mic.record(numframes=1000000)
print("Done...Stop your sound so you can hear playback")
time.sleep(5)
sp.play(data)
I install a virtul soundcard(blackhole) on mac to record the system audio, and is worked.
I only record system audio without microphone audio, as I don't need it
On Ubuntu, you can use 'pavucontrol' to change the recording source. An example of recording audio directly from the speakers (without using a microphone):
First you run a script like the one below:
import pyaudio
mic = pyaudio.PyAudio()
stream = mic.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, output=True, frames_per_buffer=2048)
stream.start_stream()
if __name__ == '__main__':
while True:
data = stream.read(1024)
# Do something with sound
Then you can change the recording source (recording tab) from 'Built-in=Audio Analog Stereo' to 'Monitor of Built-in=Audio Analog Stereo'.
With this approach, you can analyze the sound from the speakers during the video call.

How to ignore background noise while recording audio using pyaudio in python?

I am using the following code to record audio in python, it works perfectly. But when I set the microphone input volume to high, the audio thus recorded using this code has a lot of noise. How to get rid of this noise??
import pyaudio
import wave
def record(filename):
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
CHUNK = 1024
RECORD_SECONDS = 15
WAVE_OUTPUT_FILENAME = filename
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
print("recording...")
print('---------------------------------')
print(int(RATE / CHUNK * RECORD_SECONDS))
print('*********************************')
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("Recording finished. . .")
# stop Recording
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(2)
audio.get_sample_size(FORMAT)
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()

PyAudio - How mix wave file into a continuous stream

I want to write a very basic application that passes audio from microphone to speakers. This is very simple with pyaudio as described on https://people.csail.mit.edu/hubert/pyaudio/ .
def passthrough():
WIDTH = 2
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
def callback(in_data, frame_count, time_info, status):
return (in_data, pyaudio.paContinue)
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
p.terminate()
But now I try to mix a wave file into this stream, when an event occurs. And that's where I am stuck right now. Playing a wave file seems to be easy, too.
def play_wave(wav_file):
wf = wave.open(wav_file, 'rb')
sample_width=wf.getsampwidth()
channels=wf.getnchannels()
rate=wf.getframerate()
second=sample_width*channels*rate
def callback(in_data, frame_count, time_info, status):
data = wf.readframes(frame_count)
return (data, pyaudio.paContinue)
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(sample_width),
channels=channels,
rate=int(rate),
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
wf.close()
p.terminate()
At this time, I have two problems.
How do I mix the wave output into the continuous stream
How can I trigger 1. on an event basis
Hope someone can light up the dark basement I am in right now.
EDIT: Assume the wave file to have same number of channels and same rate, so no conversion necessary.
After moving the throughput() function into a thread it works like desired. When I tried this yesterday, I just fucked up the thread start (called throughput from init instead in run() method).
So here the complete, working code.
import pyaudio
import wave
import threading
import time
class AudioPass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
self.passthrough()
def passthrough(self):
WIDTH = 2
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
def callback(in_data, frame_count, time_info, status):
return (in_data, pyaudio.paContinue)
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
p.terminate()
def play_wave(wav_file):
wf = wave.open(wav_file, 'rb')
sample_width=wf.getsampwidth()
channels=wf.getnchannels()
rate=wf.getframerate()
second=sample_width*channels*rate
def callback(in_data, frame_count, time_info, status):
data = wf.readframes(frame_count)
return (data, pyaudio.paContinue)
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(sample_width),
channels=channels,
rate=int(rate),
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
wf.close()
p.terminate()
thread = AudioPass()
thread.start()
play_wave('C:/bell.wav')
Later I will also try another way a colleauge suggested today and if it does well too, I will put it here as an alternative, too. Using the threaded way is nice because I can use different rates for the stream and the wav file.
A colleague provided the below solution, which is a very raw approach, but it works and is good for understanding how this pyaudio stuff works.
import time
import pyaudio
import numpy
WIDTH = 2
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
SINE_WAVE_FREQUENCY = 440.0 # In Hz
SINE_WAVE_DURATION = 5.0 # In seconds
SINE_WAVE_VOLUME = 0.5
SINE_WAVE = (numpy.sin(2 * numpy.pi * numpy.arange(RATE * SINE_WAVE_DURATION) * SINE_WAVE_FREQUENCY / RATE)).astype(numpy.float32) * SINE_WAVE_VOLUME
def loopback(in_data, frame_count, time_info, status):
return (in_data, pyaudio.paContinue)
stream = p.open(format=p.get_format_from_width(WIDTH), channels=CHANNELS, rate=RATE, input=True, output=True, stream_callback=loopback)
stream.start_stream()
def playsine():
sinestream = p.open(format=pyaudio.paFloat32, channels=1, rate=RATE, output=True)
sinestream.write(SINE_WAVE)
sinestream.stop_stream()
sinestream.close()
while True:
input("Press enter to play a sine wave")
playsine()

pyaudio change the number of samples of calling the callback function

I want to do a simple realtime processing to audio every 4096 samples. But this code calls the callback function every 1024 samples. I just want to change the frame_count to 4096.
import pyaudio
import time
WIDTH = 2
CHANNELS = 1
RATE = 44100
p = pyaudio.PyAudio()
def callback(in_data, frame_count, time_info, status):
out=do_something(in_data)
print(frame_count)#1024
return (out, pyaudio.paContinue)
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
p.terminate()
I haven't tested it, but from the documentation it seems that if you change the stream open line to:
stream = p.open(format=p.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=4096,
stream_callback=callback)
that you should get your required number of samples per block. The frames_per_buffer defaults at 1024 so that's probably why you're getting this value in your test.
Good luck!

Categories