I'm trying to extract data from an wav file for audio analysis of each frequency and their amplitude with respect to time, my aim to run this data for a machine learning algorithm for a college project, after a bit of googling I found out that this can be done by Python's matplotlib library, I saw some sample codes that ran a Short Fourier transform and plotted a spectrogram of these wav files but wasn't able to understand how to use this library to extract data (all frequency's amplitude at a given time in the audio file) and store it in an 3D array or a .mat file.
Here's the code I saw on some website:
#!/usr/bin/env python
""" This work is licensed under a Creative Commons Attribution 3.0 Unported License.
Frank Zalkow, 2012-2013 """
import numpy as np
from matplotlib import pyplot as plt
import scipy.io.wavfile as wav
from numpy.lib import stride_tricks
""" short time fourier transform of audio signal """
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning):
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
samples = np.append(np.zeros(np.floor(frameSize/2.0)), sig)
# cols for windowing
cols = np.ceil( (len(samples) - frameSize) / float(hopSize)) + 1
# zeros at end (thus samples can be fully covered by frames)
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(samples, shape=(cols, frameSize), strides=(samples.strides[0]*hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
""" scale frequency axis logarithmically """
def logscale_spec(spec, sr=44100, factor=20.):
timebins, freqbins = np.shape(spec)
scale = np.linspace(0, 1, freqbins) ** factor
scale *= (freqbins-1)/max(scale)
scale = np.unique(np.round(scale))
# create spectrogram with new freq bins
newspec = np.complex128(np.zeros([timebins, len(scale)]))
for i in range(0, len(scale)):
if i == len(scale)-1:
newspec[:,i] = np.sum(spec[:,scale[i]:], axis=1)
else:
newspec[:,i] = np.sum(spec[:,scale[i]:scale[i+1]], axis=1)
# list center freq of bins
allfreqs = np.abs(np.fft.fftfreq(freqbins*2, 1./sr)[:freqbins+1])
freqs = []
for i in range(0, len(scale)):
if i == len(scale)-1:
freqs += [np.mean(allfreqs[scale[i]:])]
else:
freqs += [np.mean(allfreqs[scale[i]:scale[i+1]])]
return newspec, freqs
""" plot spectrogram"""
def plotstft(audiopath, binsize=2**10, plotpath=None, colormap="jet"):
samplerate, samples = wav.read(audiopath)
s = stft(samples, binsize)
sshow, freq = logscale_spec(s, factor=1.0, sr=samplerate)
ims = 20.*np.log10(np.abs(sshow)/10e-6) # amplitude to decibel
timebins, freqbins = np.shape(ims)
plt.figure(figsize=(15, 7.5))
plt.imshow(np.transpose(ims), origin="lower", aspect="auto", cmap=colormap, interpolation="none")
plt.colorbar()
plt.xlabel("time (s)")
plt.ylabel("frequency (hz)")
plt.xlim([0, timebins-1])
plt.ylim([0, freqbins])
xlocs = np.float32(np.linspace(0, timebins-1, 5))
plt.xticks(xlocs, ["%.02f" % l for l in ((xlocs*len(samples)/timebins)+(0.5*binsize))/samplerate])
ylocs = np.int16(np.round(np.linspace(0, freqbins-1, 10)))
plt.yticks(ylocs, ["%.02f" % freq[i] for i in ylocs])
if plotpath:
plt.savefig(plotpath, bbox_inches="tight")
else:
plt.show()
plt.clf()
plotstft("abc.wav")
Please guide me to understand how to extract the data, if not by matplotlib, recommend me some other library which will help me achieve this.
First of all, this looks like my code which is stated to be under a CC license. I don't take it too serious, but you should not ignore those aspects (you omitted the statement of authorship in this case), others could be more miffed about such a thing.
To your question: In this code the stft isn't computed by matplotlib, but just by numpy. You can get it like this:
samplerate, samples = wav.read(audiopath)
s = stft(samples, 1024)
I am not sure why you want a 3D array? It is a 2D-array, but it is complex valued. If you want to save it in a .mat file:
from scipy.io import savemat
savemat("file.mat", {'arr': s})
You can see once the wav audio file is read into variable samples it is passed to a function called stft :
samplerate, samples = wav.read(audiopath)
s = stft(samples, binsize)
here you already have access to the audio samples in var samples in the form of integers ... be aware that bit depth will impact number of bytes per sample as represented as a series of integers ... also know your endianness (left to right or visa versa) ... however in function stft that array is further processed into an array of floats in variable : frames before its passed into function np.fft.rfft
Depending on your needs those are your access choices without doing any of your own processing
Related
I want to apply the following to an input audio file.
Input are mono audio files with 16 bit resolution at 44.1 kHz sampling rate. The audio is normalized and padded with 0.25 seconds of silence, to avoid onsets occurring immediately at the beginning of the audio file. First a logarithmic power spectrogram is calculated using a 2048 samples window size and a resulting frame rate of 100Hz. The frequency axis is then transformed to a logarithmic scale using twelve triangular filters per octave for a frequency range from 20 to 20,000 Hz. This results in a total number of 84 frequency bins.
But I was not able to implement this. I should be able to load a wav file and convert it to the expected output.
import numpy as np
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt
#read wav file
rate, data = wav.read('C:/...')
#convert to mono
if data.ndim > 1:
data = data.sum(axis=1) / 2
#normalize
data = data / np.max(np.abs(data))
#pad with 0.25 seconds of silence
data = np.append(data, np.zeros(int(0.25 * rate)))
#calculate spectrogram
spec = np.abs(np.fft.rfft(data, n=2048))**2
#convert to log scale (dB)
spec = 10 * np.log10(spec)
filters = np.zeros((12, 84))
#convert to chroma
chroma = np.dot(spec, np.transpose(filters))
#convert to onset strength
onset = np.diff(chroma, axis=0)
#plot
plt.figure(figsize=(12, 4))
plt.imshow(chroma.T, aspect='auto', origin='lower', interpolation='none')
plt.show()
I'm trying to plot the Amplitude (dBFS) vs. Time (s) plot of an audio (.wav) file using matplotlib. I managed to do that with the following code:
def convert_to_decibel(sample):
ref = 32768 # Using a signed 16-bit PCM format wav file. So, 2^16 is the max. value.
if sample!=0:
return 20 * np.log10(abs(sample) / ref)
else:
return 20 * np.log10(0.000001)
from scipy.io.wavfile import read as readWav
from scipy.fftpack import fft
import matplotlib.pyplot as gplot1
import matplotlib.pyplot as gplot2
import numpy as np
import struct
import gc
wavfile1 = '/home/user01/audio/speech.wav'
wavsamplerate1, wavdata1 = readWav(wavfile1)
wavdlen1 = wavdata1.size
wavdtype1 = wavdata1.dtype
gplot1.rcParams['figure.figsize'] = [15, 5]
pltaxis1 = gplot1.gca()
gplot1.axhline(y=0, c="black")
gplot1.xticks(np.arange(0, 10, 0.5))
gplot1.yticks(np.arange(-200, 200, 5))
gplot1.grid(linestyle = '--')
wavdata3 = np.array([convert_to_decibel(i) for i in wavdata1], dtype=np.int16)
yvals3 = wavdata3
t3 = wavdata3.size / wavsamplerate1
xvals3 = np.linspace(0, t3, wavdata3.size)
pltaxis1.set_xlim([0, t3 + 2])
pltaxis1.set_title('Amplitude (dBFS) vs Time(s)')
pltaxis1.plot(xvals3, yvals3, '-')
which gives the following output:
I had also plotted the Power Spectral Density (PSD, in dBm) using the code below:
from scipy.signal import welch as psd # Computes PSD using Welch's method.
fpsd, wPSD = psd(wavdata1, wavsamplerate1, nperseg=1024)
gplot2.rcParams['figure.figsize'] = [15, 5]
pltpsdm = gplot2.gca()
gplot2.axhline(y=0, c="black")
pltpsdm.plot(fpsd, 20*np.log10(wPSD))
gplot2.xticks(np.arange(0, 4000, 400))
gplot2.yticks(np.arange(-150, 160, 10))
pltpsdm.set_xlim([0, 4000])
pltpsdm.set_ylim([-150, 150])
gplot2.grid(linestyle = '--')
which gives the output as:
The second output above, using the Welch's method plots a more presentable output. The dBFS plot though informative is not very presentable IMO. Is this because of:
the difference in the domains (time in case of 1st output vs frequency in the 2nd output)?
the way plot function is implemented in pyplot?
Also, is there a way I can plot my dBFS output as a peak-to-peak style of plot just like in my PSD (dBm) plot rather than a dense stem plot?
Would be much helpful and would appreciate any pointers, answers or suggestions from experts here as I'm just a beginner with matplotlib and plots in python in general.
TLNR
This has nothing to do with pyplot.
The frequency domain is different from the time domain, but that's not why you didn't get what you want.
The calculation of dbFS in your code is wrong.
You should frame your data, calculate RMSs or peaks in every frame, and then convert that value to dbFS instead of applying this transformation to every sample point.
When we talk about the amplitude, we are talking about a periodic signal. And when we read in a series of data from a sound file, we read in a series of sample points of a signal(may be or be not periodic). The value of every sample point represents a, say, voltage value, or sound pressure value sampled at a specific time.
We assume that, within a very short time interval, maybe 10ms for example, the signal is stationary. Every such interval is called a frame.
Some specific function is applied to each frame usually, to reduce the sudden change at the edge of this frame, and these functions are called window functions. If you did nothing to every frame, you added rectangle windows to them.
An example: when the sampling frequency of your sound is 44100Hz, in a 10ms-long frame, there are 44100*0.01=441 sample points. That's what the nperseg argument means in your psd function but it has nothing to do with dbFS.
Given the knowledge above, now we can talk about the amplitude.
There are two methods a get the value of amplitude in every frame:
The most straightforward one is to get the maximum(peak) values in every frame.
Another one is to calculate the RMS(Root Mean Sqaure) of every frame.
After that, the peak values or RMS values can be converted to dbFS values.
Let's start coding:
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
# Determine full scall(maximum possible amplitude) by bit depth
bit_depth = 16
full_scale = 2 ** bit_depth
# dbFS function
to_dbFS = lambda x: 20 * np.log10(x / full_scale)
# Read in the wave file
fname = "01.wav"
fs,data = wavfile.read(fname)
# Determine frame length(number of sample points in a frame) and total frame numbers by window length(how long is a frame in seconds)
window_length = 0.01
signal_length = data.shape[0]
frame_length = int(window_length * fs)
nframes = signal_length // frame_length
# Get frames by broadcast. No overlaps are used.
idx = frame_length * np.arange(nframes)[:,None] + np.arange(frame_length)
frames = data[idx].astype("int64") # Convert to in 64 to avoid integer overflow
# Get RMS and peaks
rms = ((frames**2).sum(axis=1)/frame_length)**.5
peaks = np.abs(frames).max(axis=1)
# Convert them to dbfs
dbfs_rms = to_dbFS(rms)
dbfs_peak = to_dbFS(peaks)
# Let's start to plot
# Get time arrays of every sample point and ever frame
frame_time = np.arange(nframes) * window_length
data_time = np.linspace(0,signal_length/fs,signal_length)
# Plot
f,ax = plt.subplots()
ax.plot(data_time,data,color="k",alpha=.3)
# Plot the dbfs values on a twin x Axes since the y limits are not comparable between data values and dbfs
tax = ax.twinx()
tax.plot(frame_time,dbfs_rms,label="RMS")
tax.plot(frame_time,dbfs_peak,label="Peak")
tax.legend()
f.tight_layout()
# Save serval details
f.savefig("whole.png",dpi=300)
ax.set_xlim(1,2)
f.savefig("1-2sec.png",dpi=300)
ax.set_xlim(1.295,1.325)
f.savefig("1.2-1.3sec.png",dpi=300)
The whole time span looks like(the unit of the right axis is dbFS):
And the voiced part looks like:
You can see that the dbFS values become greater while the amplitudes become greater at the vowel start point:
I am trying to visualize a frequency spectrum using the BURG algroithm. The data that I am trying to visualize is the distance between heartbeats in milliseconds (e.g: [700, 650, 689, ..., 702]). Time distance is measured from R peak to R peak of next heartbeat.
Now I would like to visualize the frequency band with python's spectrum library (I'm a total noob). The minimum frequency that I am trying to display is 0.0033Hz, so all time differences in my dataset summarized are 5 Minutes long.
My approach was to first take the reciprocal of each value, then multiply by 1000, and then multiply by 60. This should get me the Bpm for each heartbeat.
This is what it looks like: [67.11409396 64.72491909 ... 64.58557589]
Afterwards I use spectrum's burg algorithm to create the PSD. The "data" list contains my BpM for each heartbeat.
AR, rho, ref = arburg(data.tolist(), 7)
PSD = arma2psd(AR, rho=rho, NFFT=1024)
PSD = PSD[len(PSD):len(PSD)//2:-1]
plot(linspace(0, 0.5, len(PSD)), 10*log10(abs(PSD)*2./(1.*pi)))
pylab.legend(['PSD estimate of x using Burg AR(7)'])
The graph that I get looks like this:
5 Minutes Spectrogram
This specific data already exists as a 3D-Spectrogram (Graph above is the equivalent to the last 5 Minutes of 3D-Spectrogram):
Long Time 3D-Spectrogram
My Graph does not seem to match the 3D-Spectrogram. My frequencies are way off.... What causes this and how can I fix it?
Also I would like the y-Axis in my Graph not in [dB] but in absolute Values. I tried with:
plot(linspace(0, 0.5, len(PSD)), abs(PSD))
but that did not really seem to work. It just drew a hyperbole.
Thank you for your help!
The spectrum package comes with a pburg class than can generate a frequencies array, this is shown below. If you want direct comparison between a spectrogram and AR PSDs, I would take the time definition used to compute the spectrogram to also compute the AR PSD per window.
Also, your spectrogram example image looks focused on very low frequencies, so you may want to increase nfft to increase frequency resolution.
import matplotlib.pyplot as plt
from scipy.signal import spectrogram
import numpy as np
from spectrum import pburg
# Parameter settings
n_seconds = 10
fs = 1000 # sampling rate, in hz
freq = 10
nfft = 4096
nperseg = fs
order = 8
# Simulate 10 hz sine wave with white noise
x = np.sin(np.arange(0, n_seconds, 1/fs) * freq * 2 * np.pi)
x += np.random.rand(len(x)) / 10
# Compute spectrogram
freqs, times, powers = spectrogram(x, fs=fs, nfft=nfft)
# Get spectrogram time definition
times = (times * fs).astype(int)
window_times = np.array((times-times[0], times+times[0])).T
# Compute Burg's spectrum per window
powers_burg = np.array([pburg(x[t[0]:t[1]], order=order,
NFFT=nfft, sampling=fs).psd for t in window_times]).T
freqs_burg = np.array(pburg(x, order=order, NFFT=nfft, sampling=fs).frequencies())
# Plot
inds = np.where(freqs < 20)
inds_burg = np.where(freqs_burg < 20)
fig, axes = plt.subplots(ncols=2, figsize=(10, 5))
axes[0].pcolormesh(times/fs, freqs[inds], powers[inds], shading='gouraud')
axes[1].pcolormesh(times/fs, freqs_burg[inds_burg], powers_burg[inds_burg], shading='gouraud')
axes[0].set_title('Spectrogram')
axes[1].set_title('Burg\'s Spectrogram')
a research professor asked me to generate 2d-spatial spectrum density plots for a couple of videos. I have two problems:
How can I plot the PSD vs. x,y axis?
I know how to generate PSD for images, but uncertain how to do the same on videos. I thought about getting PSDs for every frame in the video and take the average, but I am having difficulties implementing it in python.
Below is the code I have
curr_dir = os.getcwd()
img = cv2.imread(curr_dir+'/test.jpg',0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
mag = 20*np.log(np.abs(fshift))
plt.subplot(121), plt.imshow(img,cmap='gray')
plt.subplot(122), plt.imshow(mag,cmap='gray')
plt.show()
This generates something like this:
I would like to get something like this:
Any help/advice is greatly appreciated!
Since you show two 1d spectra, it would seem that you are looking for something like the following.
We read in the image, Fourier transform along one axis, and then sum the power in each bin, along the other axis. Since the input is real valued, we use rfft() so what we do not have to shift the spectrum, and we use rfftreq() to calculate the frequency for each bin. We graph the result omitting the sometimes large signal in the 0 frequency bin (which corresponds to baseline) so that the useful part of the spectrum appears on a convenient scale.
#!/usr/bin/python3
import cv2
import os
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
curr_dir = os.getcwd()
img = cv2.imread(curr_dir+'/temp.png',0)
print( img.shape )
# Fourier Transform along the first axis
# Round up the size along this axis to an even number
n = int( math.ceil(img.shape[0] / 2.) * 2 )
# We use rfft since we are processing real values
a = np.fft.rfft(img,n, axis=0)
# Sum power along the second axis
a = a.real*a.real + a.imag*a.imag
a = a.sum(axis=1)/a.shape[1]
# Generate a list of frequencies
f = np.fft.rfftfreq(n)
# Graph it
plt.plot(f[1:],a[1:], label = 'sum of amplitudes over y vs f_x')
# Fourier Transform along the second axis
# Same steps as above
n = int( math.ceil(img.shape[1] / 2.) * 2 )
a = np.fft.rfft(img,n,axis=1)
a = a.real*a.real + a.imag*a.imag
a = a.sum(axis=0)/a.shape[0]
f = np.fft.rfftfreq(n)
plt.plot(f[1:],a[1:], label ='sum of amplitudes over x vs f_y')
plt.ylabel( 'amplitude' )
plt.xlabel( 'frequency' )
plt.yscale( 'log' )
plt.legend()
plt.savefig( 'test_rfft.png' )
#plt.show()
Applying this to the photograph posted in your question, produces the following result,
So i recently successfully built a system which will record, plot, and playback an audio wav file entirely with python. Now, I'm trying to put some filtering and audio mixing in between the when i record and when i start plotting and outputting the file to the speakers. However, i have no idea where to start. Right now I'm to read in a the intial wav file, apply a low pass filter, and then re-pack the newly filtered data into a new wav file. Here is the code i used to plot the initial data once i recorded it.
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
spf = wave.open('wavfile.wav','r')
#Extract Raw Audio from Wav File
signal = spf.readframes(-1)
signal = np.fromstring(signal, 'Int16')
plt.figure(1)
plt.title('Signal Wave...')
plt.plot(signal)
And here is some code i used to generate a test audio file of a single tone:
import numpy as np
import wave
import struct
freq = 440.0
data_size = 40000
fname = "High_A.wav"
frate = 11025.0
amp = 64000.0
sine_list_x = []
for x in range(data_size):
sine_list_x.append(np.sin(2*np.pi*freq*(x/frate)))
wav_file = wave.open(fname, "w")
nchannels = 1
sampwidth = 2
framerate = int(frate)
nframes = data_size
comptype = "NONE"
compname = "not compressed"
wav_file.setparams((nchannels, sampwidth, framerate, nframes,
comptype, compname))
for s in sine_list_x:
wav_file.writeframes(struct.pack('h', int(s*amp/2)))
wav_file.close()
I'm not really sure how to apply said audio filter and repack it, though. Any help and/or advice you could offer would be greatly appreciated.
First step : What kind of audio filter do you need ?
Choose the filtered band
Low-pass Filter : remove highest frequency from your audio signal
High-pass Filter : remove lowest frequencies from your audio signal
Band-pass Filter : remove both highest and lowest frequencies from your audio signal
For the following steps, i assume you need a Low-pass Filter.
Choose your cutoff frequency
The Cutoff frequency is the frequency where your signal will be attenuated by -3dB.
Your example signal is 440Hz, so let's choose a Cutoff frequency of 400Hz. Then your 440Hz-signal is attenuated (more than -3dB), by the Low-pass 400Hz filter.
Choose your filter type
According to this other stackoverflow answer
Filter design is beyond the scope of Stack Overflow - that's a DSP
problem, not a programming problem. Filter design is covered by any
DSP textbook - go to your library. I like Proakis and Manolakis'
Digital Signal Processing. (Ifeachor and Jervis' Digital Signal
Processing isn't bad either.)
To go inside a simple example, I suggest to use a moving average filter (for a simple low-pass filter).
See Moving average
Mathematically, a moving average is a type of convolution and so it can be viewed as an example of a low-pass filter used in signal processing
This Moving average Low-pass Filter is a basic filter, and it is quite easy to use and to understand.
The parameter of the moving average is the window length.
The relationship between moving average window length and Cutoff frequency needs little bit mathematics and is explained here
The code will be
import math
sampleRate = 11025.0
cutOffFrequency = 400.0
freqRatio = cutOffFrequency / sampleRate
N = int(math.sqrt(0.196201 + freqRatio**2) / freqRatio)
So, in the example, the window length will be 12
Second step : coding the filter
Hand-made moving average
see specific discussion on how to create a moving average in python
Solution from Alleo is
def running_mean(x, windowSize):
cumsum = numpy.cumsum(numpy.insert(x, 0, 0))
return (cumsum[windowSize:] - cumsum[:-windowSize]) / windowSize
filtered = running_mean(signal, N)
Using lfilter
Alternatively, as suggested by dpwilson, we can also use lfilter
win = numpy.ones(N)
win *= 1.0/N
filtered = scipy.signal.lfilter(win, [1], signal).astype(channels.dtype)
Third step : Let's Put It All Together
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
import math
import contextlib
fname = 'test.wav'
outname = 'filtered.wav'
cutOffFrequency = 400.0
# from http://stackoverflow.com/questions/13728392/moving-average-or-running-mean
def running_mean(x, windowSize):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[windowSize:] - cumsum[:-windowSize]) / windowSize
# from http://stackoverflow.com/questions/2226853/interpreting-wav-data/2227174#2227174
def interpret_wav(raw_bytes, n_frames, n_channels, sample_width, interleaved = True):
if sample_width == 1:
dtype = np.uint8 # unsigned char
elif sample_width == 2:
dtype = np.int16 # signed 2-byte short
else:
raise ValueError("Only supports 8 and 16 bit audio formats.")
channels = np.fromstring(raw_bytes, dtype=dtype)
if interleaved:
# channels are interleaved, i.e. sample N of channel M follows sample N of channel M-1 in raw data
channels.shape = (n_frames, n_channels)
channels = channels.T
else:
# channels are not interleaved. All samples from channel M occur before all samples from channel M-1
channels.shape = (n_channels, n_frames)
return channels
with contextlib.closing(wave.open(fname,'rb')) as spf:
sampleRate = spf.getframerate()
ampWidth = spf.getsampwidth()
nChannels = spf.getnchannels()
nFrames = spf.getnframes()
# Extract Raw Audio from multi-channel Wav File
signal = spf.readframes(nFrames*nChannels)
spf.close()
channels = interpret_wav(signal, nFrames, nChannels, ampWidth, True)
# get window size
# from http://dsp.stackexchange.com/questions/9966/what-is-the-cut-off-frequency-of-a-moving-average-filter
freqRatio = (cutOffFrequency/sampleRate)
N = int(math.sqrt(0.196196 + freqRatio**2)/freqRatio)
# Use moviung average (only on first channel)
filtered = running_mean(channels[0], N).astype(channels.dtype)
wav_file = wave.open(outname, "w")
wav_file.setparams((1, ampWidth, sampleRate, nFrames, spf.getcomptype(), spf.getcompname()))
wav_file.writeframes(filtered.tobytes('C'))
wav_file.close()
sox library can be used for static noise removal.
I found this gist which has some useful commands as examples