How to change audio speed without changing pitch? - python

I need to apply audio to video at certain time with certain duration, but some audio duration is bigger(or smaller) then needed. How to change speed of audio without changing pitch? I tried to change fps(by multiplying to division of needed duration to audio duration) but it is not work as I want.
original = VideoFileClip("orig.mp4")
clips = [orig.audio.volumex(0.3)]
subs = [] #some array
i = 0
for sub in subs:
clip = AudioFileClip("\\temp{}.mp3")
mult = clip.duration / (sub.end - sub.start) + 0.00001
clip = AudioArrayClip(clip.to_soundarray(buffersize=500, fps=24000/mult), fps=24000).set_start(sub.start).set_end(sub.end)
clips.append(clip)
i += 1
final = CompositeAudioClip(clips)
final.write_audiofile("final.mp3")

you can use librosa module:
from scipy.io import wavfile
import librosa, numpy as np
song, fs = librosa.load("song.wav")
song_2_times_faster = librosa.effects.time_stretch(song, 2)
scipy.io.wavfile.write("song_2_times_faster.wav", fs, song_2_times_faster) # save the song

Using wave: Change the sampling rate
import wave
CHANNELS = 1
swidth = 2
Change_RATE = 2
spf = wave.open('VOZ.wav', 'rb')
RATE=spf.getframerate()
signal = spf.readframes(-1)
wf = wave.open('changed.wav', 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(swidth)
wf.setframerate(RATE*Change_RATE)
wf.writeframes(signal)
wf.close()

Related

Librosa Split .wav file into 15s intervals

I'm new to working with audio files. I have several 60 second long files that I want to split into 15 second files (or any length). I'm able to split files into 1 second long files (so 60 files) but can't seem to get 15 second intervals to work. How can I create the intervals I'm looking for?
import os
import numpy as np
import librosa
import librosa.display
audio_dir = r'data\acoustics\recordings'
out_dir = r'data\acoustics\splits'
os.makedirs(out_dir, exist_ok=True)
audio_file = os.path.join(audio_dir, 'rec_20220729T160547Z.wav')
wave, sr = librosa.load(audio_file, sr=None)
num_sections = int(np.ceil(len(wave) / sr)
split = []
for i in range(num_sections):
t = wave[i * sr : i * sr + sr]
split.append(t)
for i in range(num_sections):
recording_name = os.path.basename(audio_file[:-4])
out_file = f"{recording_name}_{str(i)}.wav"
sf.write(os.path.join(out_dir, out_file), split[i], sr)
What you have done is mostly correct. It just need minor changes.
First is getting the data which you have done correctly.
import os
import numpy as np
import librosa
import librosa.display
import soundfile as sf # Missing import
audio_dir = r'data\acoustics\recordings'
out_dir = r'data\acoustics\splits'
os.makedirs(out_dir, exist_ok=True)
audio_file = os.path.join(audio_dir, 'rec_20220729T160547Z.wav')
wave, sr = librosa.load(audio_file, sr=None)
Calculate the length of segment:
segment_dur_secs = 15
segment_length = sr * segment_dur_secs
Breaking up the data and saving to file:
num_sections = int(np.ceil(len(wave) / segment_length))
split = []
for i in range(num_sections):
t = wave[i * segment_length: (i + 1) * segment_length]
split.append(t)
for i in range(num_sections):
recording_name = os.path.basename(audio_file[:-4])
out_file = f"{recording_name}_{str(i)}.wav"
sf.write(os.path.join(out_dir, out_file), split[i], sr)
Alternatively:
split = []
for s in range(0, len(wave), segment_length):
t = wave[s: s + segment_length]
split.append(t)
recording_name = os.path.basename(audio_file[:-4])
for i, segment in enumerate(split):
out_file = f"{recording_name}_{i}.wav"
sf.write(os.path.join(out_dir, out_file), segment, sr)
Edit: There is an issue with the code here because sf is not defined. (Fixed the import)

Volume control in Pyaudio

I have pilfered some of the following code from previous stack exchange posts about
creating simple sine wave tones with pyaudio. The code is written to loop through the notes of the chromatic scale over two octaves. Each note has a frequency selected from the array named "notes". My questions: Why is the volume changing with each note? How can I keep the volume the same for each note? Please know that I am a pyaudio beginner.
import pyaudio
import numpy as np
import time
from numpy import zeros
p = pyaudio.PyAudio()
SR = 44100 # sampling rate
duration = 2.0 # seconds
durationPause = 2.0
#Frequency of pitches
notes = [220.00, 233.08, 246.94, 261.63, 277.18, 293.66, 311.13, 329.63, 349.23, 369.99, 392.00, 415.30, 440.00, 466.16, 493.88, 523.25, 554.37, 587.33, 622.25, 659.25, 698.46, 739.99, 783.99, 830.61, 880.00]
stream = p.open(format=pyaudio.paFloat32,
channels=1,
rate=SR,
output=True)
count = 0
while count < 23:
count += 1
f = notes[count]
#create sine wave of frequency f
samples = (np.sin(2.0*np.pi*np.arange(int(SR*duration))*(f/SR))).astype(np.float32)
#create pause
pause = zeros(int(SR*durationPause)).astype(np.float32)
#concatenate pitch and pause
samples = np.concatenate((samples, pause))
# Play sound.
stream.write((samples).tobytes())
stream.stop_stream()
stream.close()
p.terminate()

How to convert a numpy array to a mp3 file

I am using the soundcard library to record my microphone input, it records in a NumPy array and I want to grab that audio and save it as an mp3 file.
Code:
import soundcard as sc
import numpy
import threading
speakers = sc.all_speakers() # Gets a list of the systems speakers
default_speaker = sc.default_speaker() # Gets the default speaker
mics = sc.all_microphones() # Gets a list of all the microphones
default_mic = sc.get_microphone('Headset Microphone (Arctis 7 Chat)') # Gets the default microphone
# Records the default microphone
def record_mic():
print('Recording...')
with default_mic.recorder(samplerate=48000) as mic, default_speaker.player(samplerate=48000) as sp:
for _ in range(1000000000000):
data = mic.record(numframes=None) # 'None' creates zero latency
sp.play(data)
# Save the mp3 file here
recordThread = threading.Thread(target=record_mic)
recordThread.start()
With Scipy (to wav file)
You can easily convert to wav and then separately convert wav to mp3. More details here.
from scipy.io.wavfile import write
samplerate = 44100; fs = 100
t = np.linspace(0., 1., samplerate)
amplitude = np.iinfo(np.int16).max
data = amplitude * np.sin(2. * np.pi * fs * t)
write("example.wav", samplerate, data.astype(np.int16))
With pydub (to mp3)
Try this function from this excellent thread -
import pydub
import numpy as np
def write(f, sr, x, normalized=False):
"""numpy array to MP3"""
channels = 2 if (x.ndim == 2 and x.shape[1] == 2) else 1
if normalized: # normalized array - each item should be a float in [-1, 1)
y = np.int16(x * 2 ** 15)
else:
y = np.int16(x)
song = pydub.AudioSegment(y.tobytes(), frame_rate=sr, sample_width=2, channels=channels)
song.export(f, format="mp3", bitrate="320k")
#[[-225 707]
# [-234 782]
# [-205 755]
# ...,
# [ 303 89]
# [ 337 69]
# [ 274 89]]
write('out2.mp3', sr, x)
Note: Output MP3 will of cause be 16-bit, because MP3s are always 16 bit. However, you can set sample_width=3 as suggested by #Arty for 24-bit input.
As of now the accepted answer produces extremely distorted sound atleast in my case so here is the improved version :
#librosa read
y,sr=librosa.load(dir+file,sr=None)
y=librosa.util.normalize(y)
#pydub read
sound=AudioSegment.from_file(dir+file)
channel_sounds = sound.split_to_mono()
samples = [s.get_array_of_samples() for s in channel_sounds]
fp_arr = np.array(samples).T.astype(np.float32)
fp_arr /= np.iinfo(samples[0].typecode).max
fp_arr=np.array([x[0] for x in fp_arr])
#i normalize the pydub waveform with librosa for comparison purposes
fp_arr=librosa.util.normalize(fp_arr)
so you read the audiofile from any library and you have a waveform then you can export it to any pydub supported codec with this code below, i also used librosa read waveform and it works perfect.
wav_io = io.BytesIO()
scipy.io.wavfile.write(wav_io, sample_rate, waveform)
wav_io.seek(0)
sound = AudioSegment.from_wav(wav_io)
with open("file_exported_by_pydub.mp3",'wb') as af:
sound.export(
af,
format='mp3',
codec='mp3',
bitrate='160000',
)

How to add noise to a wav file in Python

I just started working with matplotlib and numpy. I need to add noise to an audio signal, in Python.
In order to do that, I need to receive the original audio signal and the noise amplitude, and then returning the original audio signal with the noise in it.
I have to use the rand function from numpy.random. The amplitude is what is supposed to help me get those values.
So far this is what I have:
import scipy.io.wavfile as spiowf
import sounddevice as sd
import matplotlib.pyplot as plt
import numpy as np
def noise(data, samplerate):
(ns, nc) = data.shape
Ts = 1 / samplerate
dur = ns / samplerate # audio duration
random = np.random.rand(0, 100)
sd.play(data, samplerate, blocking=True)
def main():
fName = "saxriff.wav"
[samplerate, data] = spiowf.read(fName)
if __name__ == "__main__":
main()
How can I do the rest?

PyAudio - multiple channel management \ Demixing

I want to calculate the single channel data (in order to calculate the audio cross correlation between the channel 1 and channel 4) of this code:
import time
import numpy as np
import pyaudio
import scipy
from scipy import signal, fftpack
pyaud = pyaudio.PyAudio()
#open the stream
stream = pyaud.open(
format = pyaudio.paInt16,
channels = 4,
rate = 16000,
input_device_index = 4,
output = False,
input = True,
frames_per_buffer=2048,)
while True:
rawsamps = stream.read(2048)
samps = np.fromstring(rawsamps, dtype=np.int16)
frames_per_buffer_length = len(samps) / 4 #(channels)
assert frames_per_buffer_length == int(frames_per_buffer_length)
samps = np.reshape(samps, (frames_per_buffer_length, 4)) #4 channels
Assuming that the raw data is interleaved.
This is the function i need to use :
signal.correlate(n1, n2, mode='full')
how can I create an array of data for each channel in order to use the correlate function? are the last lines of the code correct?
Thank you
I found the answer, using print loudness(samps[:,0]), loudness(samps[:,3]). It print in the shell " mic 1 loudness , mic 4 loudness"

Categories