Two dimensional FFT using python results in slightly shifted frequency - python

I know there have been several questions about using the Fast Fourier Transform (FFT) method in python, but unfortunately none of them could help me with my problem:
I want to use python to calculate the Fast Fourier Transform of a given two dimensional signal f, i.e. f(x,y). Pythons documentation helps a lot, solving a few issues, which the FFT brings with it, but i still end up with a slightly shifted frequency compared to the frequency i expect it to show. Here is my python code:
from scipy.fftpack import fft, fftfreq, fftshift
import matplotlib.pyplot as plt
import numpy as np
import math
fq = 3.0 # frequency of signal to be sampled
N = 100.0 # Number of sample points within interval, on which signal is considered
x = np.linspace(0, 2.0 * np.pi, N) # creating equally spaced vector from 0 to 2pi, with spacing 2pi/N
y = x
xx, yy = np.meshgrid(x, y) # create 2D meshgrid
fnc = np.sin(2 * np.pi * fq * xx) # create a signal, which is simply a sine function with frequency fq = 3.0, modulating the x(!) direction
ft = np.fft.fft2(fnc) # calculating the fft coefficients
dx = x[1] - x[0] # spacing in x (and also y) direction (real space)
sampleFrequency = 2.0 * np.pi / dx
nyquisitFrequency = sampleFrequency / 2.0
freq_x = np.fft.fftfreq(ft.shape[0], d = dx) # return the DFT sample frequencies
freq_y = np.fft.fftfreq(ft.shape[1], d = dx)
freq_x = np.fft.fftshift(freq_x) # order sample frequencies, such that 0-th frequency is at center of spectrum
freq_y = np.fft.fftshift(freq_y)
half = len(ft) / 2 + 1 # calculate half of spectrum length, in order to only show positive frequencies
plt.imshow(
2 * abs(ft[:half,:half]) / half,
aspect = 'auto',
extent = (0, freq_x.max(), 0, freq_y.max()),
origin = 'lower',
interpolation = 'nearest',
)
plt.grid()
plt.colorbar()
plt.show()
And what i get out of this when running it, is:
Now you see that the frequency in x direction is not exactly at fq = 3, but slightly shifted to the left. Why is this?
I would assume that is has to do with the fact, that FFT is an algorithm using symmetry arguments and
half = len(ft) / 2 + 1
is used to show the frequencies at the proper place. But I don't quite understand what the exact problem is and how to fix it.
Edit: I have also tried using a higher sampling frequency (N = 10000.0), which did not solve the issue, but instead shifted the frequency slightly too far to the right. So i am pretty sure that the problem is not the sampling frequency.
Note: I'm aware of the fact, that the leakage effect leads to unphysical amplitudes here, but in this post I am primarily interested in the correct frequencies.

I found a number of issues
you use 2 * np.pi twice, you should choose one of either linspace or the arg to sine as radians if you want a nice integer number of cycles
additionally np.linspace defaults to endpoint=True, giving you an extra point for 101 instead of 100
fq = 3.0 # frequency of signal to be sampled
N = 100 # Number of sample points within interval, on which signal is considered
x = np.linspace(0, 1, N, endpoint=False) # creating equally spaced vector from 0 to 2pi, with spacing 2pi/N
y = x
xx, yy = np.meshgrid(x, y) # create 2D meshgrid
fnc = np.sin(2 * np.pi * fq * xx) # create a signal, which is simply a sine function with frequency fq = 3.0, modulating the x(!) direction
you can check these issues:
len(x)
Out[228]: 100
plt.plot(fnc[0])
fixing the linspace endpoint now means you have an even number of fft bins so you drop the + 1 in the half calc
matshow() appears to have better defaults, your extent = (0, freq_x.max(), 0, freq_y.max()), in imshow appears to fubar the fft bin numbering
from scipy.fftpack import fft, fftfreq, fftshift
import matplotlib.pyplot as plt
import numpy as np
import math
fq = 3.0 # frequency of signal to be sampled
N = 100 # Number of sample points within interval, on which signal is considered
x = np.linspace(0, 1, N, endpoint=False) # creating equally spaced vector from 0 to 2pi, with spacing 2pi/N
y = x
xx, yy = np.meshgrid(x, y) # create 2D meshgrid
fnc = np.sin(2 * np.pi * fq * xx) # create a signal, which is simply a sine function with frequency fq = 3.0, modulating the x(!) direction
plt.plot(fnc[0])
ft = np.fft.fft2(fnc) # calculating the fft coefficients
#dx = x[1] - x[0] # spacing in x (and also y) direction (real space)
#sampleFrequency = 2.0 * np.pi / dx
#nyquisitFrequency = sampleFrequency / 2.0
#
#freq_x = np.fft.fftfreq(ft.shape[0], d=dx) # return the DFT sample frequencies
#freq_y = np.fft.fftfreq(ft.shape[1], d=dx)
#
#freq_x = np.fft.fftshift(freq_x) # order sample frequencies, such that 0-th frequency is at center of spectrum
#freq_y = np.fft.fftshift(freq_y)
half = len(ft) // 2 # calculate half of spectrum length, in order to only show positive frequencies
plt.matshow(
2 * abs(ft[:half, :half]) / half,
aspect='auto',
origin='lower'
)
plt.grid()
plt.colorbar()
plt.show()
zoomed the plot:

Related

Fourier Transform - strange results

I'm trying to make some example of FFTs. The idea here is to have 3 wavelengths for 3 different musical notes (A, C, E), add them together (to form the aminor chord) and then do an FFT to retrieve the original frequencies.
import numpy as np
import matplotlib.pyplot as plt
import scipy.fft
def generate_sine_wave(freq, sample_rate, duration):
x = np.linspace(0, duration, int(sample_rate * duration), endpoint=False)
frequencies = x * freq
# 2pi because np.sin takes radians
y = np.sin(2 * np.pi * frequencies)
return x, y
def main():
# Frequency of note in Aminor chord (A, C, E)
# note_names = ('A', 'C', 'E')
# fs = (440, 261.63, 329.63)
fs = (27.50, 16.35, 20.60)
# duration, in seconds.
duration = .5
# sample rate. determines how many data points the signal uses to represent
# the sine wave per second. So if the signal had a sample rate of 10 Hz and
# was a five-second sine wave, then it would have 10 * 5 = 50 data points.
sample_rate = 1000
fig, ax = plt.subplots(5)
all_wavelengths = []
# Create a linspace, with N samples from 0 to duration
# x = np.linspace(0.0, T, N)
for i, f in enumerate(fs):
x, y = generate_sine_wave(f, sample_rate, duration)
# y = np.sin(2 * np.pi * F * x)
all_wavelengths.append(y)
ax[i].plot(x, y)
# sum of all notes
aminor = np.sum(all_wavelengths, axis=0)
ax[i].plot(x, aminor)
yf = np.abs(scipy.fft.rfft(aminor))
xf = scipy.fft.rfftfreq(int(sample_rate * duration), 1 / sample_rate)
ax[i + 1].plot(xf, yf)
ax[i + 1].vlines(fs, ymin=0, ymax=(max(yf)), color='purple')
plt.show()
if __name__ == '__main__':
main()
However, the FFT plot (last subplot) does not have the proper peak frequencies (highlighted through vertical purple lines). Why is that?
The FFT will only recover the contained frequencies exactly if the sampling window covers a multiple of the signal's period. Otherwise, if there is a "remainder", the frequency peaks will deviate from the exact values.
Since your A-minor signal contains three distinct frequencies, 27.50, 16.35, 20.60 Hz, you need a sampling duration which covers a multiple of the period for each of those components. In order to find that duration, you can compute the least common multiple of each of the fractional parts of the frequencies:
>>> import math
>>> math.lcm(50, 35, 60, 100)
2100
Note that we're including 100 here because the multiple also needs to satisfy the condition to sample a whole period. The above result implies that for a duration of 21 seconds, the frequencies will be recovered perfectly. Of course, any other multiple of 21 seconds will work as well. The following plot is obtained for a duration of 21 seconds:
I think that - within margin of error - the results do in-fact match your frequencies:
You can see in your frequency plot that the closest frequency in the plot to your actual frequencies do indeed have the highest amplitude.
However, because this is a DFT algorithm, and so the frequencies being returned are discrete, they don't exactly match the frequencies you used to construct your sample.
What you can try is making your sample size (ie the number of time points in your input data) either longer and/or a multiple of your input wavelengths. That should increase the frequency resolution and/or move the sampled output frequencies closer to input frequencies.

Output of fft.fft() for magnitude and phase (angle) not corresponding the the values set up

I set up a sine wave of a certain amplitude, frequency and phase, and tried recovering the amplitude and phase:
import numpy as np
import matplotlib.pyplot as plt
N = 1000 # Sample points
T = 1 / 800 # Spacing
t = np.linspace(0.0, N*T, N) # Time
frequency = np.fft.fftfreq(t.size, d=T) # Normalized Fourier frequencies in spectrum.
f0 = 25 # Frequency of the sampled wave
phi = np.pi/6 # Phase
A = 50 # Amplitude
s = A * np.sin(2 * np.pi * f0 * t - phi) # Signal
S = np.fft.fft(s) # Unnormalized FFT
fig, [ax1,ax2] = plt.subplots(nrows=2, ncols=1, figsize=(10, 5))
ax1.plot(t,s,'.-', label='time signal')
ax2.plot(freq[0:N//2], 2/N * np.abs(S[0:N//2]), '.', label='amplitude spectrum')
plt.show()
index, = np.where(np.isclose(frequency, f0, atol=1/(T*N))) # Getting the normalized frequency close to f0 in Hz)
magnitude = np.abs(S[index[0]]) # Magnitude
phase = np.angle(S[index[0]]) # Phase
print(magnitude)
print(phase)
phi
#21785.02149316858
#-1.2093259641890741
#0.5235987755982988
Now the amplitude should be 50, instead of 21785, and the phase pi/6=0.524, instead of -1.2.
Am I misinterpreting the output, or the answer on the post referred to in the link above?
You need to normalize the fft by 1/N with one of the two following changes (I used the 2nd one):
S = np.fft.fft(s) --> S = 1/N*np.fft.fft(s)
magnitude = np.abs(S[index[0]]) --> magnitude = 1/N*np.abs(S[index[0]])
Don't use index, = np.where(np.isclose(frequency, f0, atol=1/(T*N))), the fft is not exact and the highest magnitude may
not be at f0, use np.argmax(np.abs(S)) instead which will give
you the peak of the signal which will be very close to f0
np.angle messes up (I think its one of those pi,pi/2 arctan offset
things) just do it manually with np.arctan(np.real(x)/np.imag(x))
use more points (I made N higher) and make T smaller for higher accuracy
since a DFT (discrete fourier transform) is double sided and has peak signals in both the negative and positive frequencies, the peak in the positive side will only be half the actual magnitude. For an fft you need to multiply every frequency by two except for f=0 to acount for this. I multiplied by 2 in magnitude = np.abs(S[index])*2/N
N = 10000
T = 1/5000
...
index = np.argmax(np.abs(S))
magnitude = np.abs(S[index])*2/N
freq_max = frequency[index]
phase = np.arctan(np.imag(S[index])/np.real(S[index]))
print(f"magnitude: {magnitude}, freq_max: {freq_max}, phase: {phase}") print(phi)
Output: magnitude: 49.996693276663564, freq_max: 25.0, phase: 0.5079341239733628

Calculate non-integer frequency with NumPy FFT

I would like to calculate the frequency of a periodic time series using NumPy FFT. As an example, let's say my time series y is defined as follows:
import numpy as np
freq = 12.3
x = np.arange(10000)
y = np.cos(x * 2 * np.pi * freq / 10000)
If the frequency is an integer, I can calculate it using np.argmax(np.abs(np.fft.fft(y))). However, in case the frequency is not an integer, how do I calculate the frequency with more precision?
EDIT: To clarify, we are not supposed to know how the time series y is generated. The above code snippet is just an artificial example of how a non-integer frequency could come up. Obviously if we already know the function that generates the time series, we don't need FFT to determine the frequency.
You need to give your signal more resolution
import numpy as np
freq = 12.3
x = np.arange(100000) # 10 times more resolution
y = np.cos(x * 2 * np.pi * freq / 10000) # don't change this
print(np.argmax(np.abs(np.fft.fft(y))) / 10) # divide by 10
# 12.3
The number of data points in x need to be 10 times more than the number you divide y with. You could get the same effect like this:
x = np.arange(10000)
y = np.cos(x * 2 * np.pi * freq / 1000)
print(np.argmax(np.abs(np.fft.fft(y))) / 10)
# 12.3
If you want to find the frequency with two decimals the resolution needs to be 100 times more.
freq = 12.34
x = np.arange(10000)
y = np.cos(x * 2 * np.pi * freq / 100) # 100 times more resolution
print(np.argmax(np.abs(np.fft.fft(y))) / 100) # divide by 100
# 12.34
You can pad the data with zeros before computing the FFT.
For example, here's your original calculation. It finds the Fourier coefficient with the maximum magnitude at frequency 12.0:
In [84]: freq = 12.3
In [85]: x = np.arange(10000)
In [86]: y = np.cos(x * 2 * np.pi * freq / 10000)
In [87]: f = np.fft.fft(y)
In [88]: k = np.argmax(np.abs(f))
In [89]: np.fft.fftfreq(len(f), d=1/10000)[k]
Out[89]: 12.0
Now recompute the Fourier transform, but pad the input to have a length of six times the original length (you can adjust that factor as needed). With the padded signal the Fourier coefficient with maximum magnitude is associated with frequency 12.333:
In [90]: f = np.fft.fft(y, 6*len(y))
In [91]: k = np.argmax(np.abs(f))
In [92]: np.fft.fftfreq(len(f), d=1/10000)[k]
Out[92]: 12.333333333333332
Here's a plot that illustrates the effect of padding the signal. The signal is not the same as above; I used different values with a much shorter signal to make it easier to see the effect. The shapes of the lobes are not changed, but the number of points at which the frequency is sampled is increased.
The plot is generated by the following script:
import numpy as np
import matplotlib.pyplot as plt
fs = 10
T = 1.4
t = np.arange(T*fs)/fs
freq = 2.6
y = np.cos(2*np.pi*freq*t)
fy = np.fft.fft(y)
magfy = np.abs(fy)
freqs = np.fft.fftfreq(len(fy), d=1/fs)
plt.plot(freqs, magfy, 'd', label='no padding')
for (factor, markersize) in [(2, 9), (16, 4)]:
fy_padded = np.fft.fft(y, factor*len(y))
magfy_padded = np.abs(fy_padded)
freqs_padded = np.fft.fftfreq(len(fy_padded), d=1/fs)
plt.plot(freqs_padded, magfy_padded, '.', label='padding factor %d' % factor,
alpha=0.5, markersize=markersize)
plt.xlabel('Frequency')
plt.ylabel('Magnitude of Fourier Coefficient')
plt.grid()
plt.legend(framealpha=1, shadow=True)
plt.show()
You can try using either interpolation or zero-padding (which is equivalent to entire vector interpolation) to potentially improve your frequency estimation, if the S/N allows. Sinc kernel interpolation is more accurate than parabolic interpolation.

Smoothly concatenating sine waves from input

Last month, I posted this question about how to concatenate sine waves WHEN you are generating them, but now I've faced a different situation where I will generate a sine and make it continue from the end of another sine I did not generate.
My solution was based on the second answer to my previous question, compute the hilbert transform, then, calculate the angle with numpy.angle and normalize it by adding 90, and generating the next sine from there. It works, but only when the unit of my frequency value is 0 or 5, otherwise, the waves doesn't match and I have no clue why.
from scipy.signal import hilbert
import numpy as np
from matplotlib import pyplot as plt
N = 1024
t = np.linspace(0, 1, N)
freq = 5.0
c = np.sin(2 * np.pi * freq * t + 0.0)
c2 = np.angle(hilbert(c), True) # in degrees
plt.subplot(2, 1, 1)
plt.grid()
plt.plot(c)
plt.subplot(2, 1, 2)
phase = c2[-1] + 90
c3 = np.sin(2.0 * np.pi * freq * t + np.deg2rad(phase))
plt.grid()
plt.plot(c3)
plt.show()
Frequency: 5.0
Frequency: 5.8
When the values at the beginning and the end of the time interval do not agree, boundary effects appear, distorting the Hilbert transform. (Recall that the Fourier transform reacts poorly to discontinuities.) This can be seen by plotting the end of c2: plt.plot(c2[-200:] + 90): notice the distortion toward the end, the curve is supposed to rise with constant slope.
You'll get better results by stepping back one period from the edge of the time window:
phase = c2[-1 - int(N//freq)] + 90
I tried with frequency 5.8: the beginning of second curve matches the end of the first.
It is not clear what your exact problem scope is. In the previous question, in a comment which spawned this followup question, you said:
If I don't have the generation equation ( say, I've got a chunk from mic ) what would be the approach?
Does this mean the data is not necessarily a sine wave? Is it noisy? Is it of varying magnitude? You mention DSP: are you doing the processing in real time, or can the analysis take as long as needed?
If it is a clean sine wave of known magnitude, it is relatively easy to extract the phase from the end of the signal, to allow a smooth continuation.
The phase is sin⁻¹(y/mag). There are two inputs to sin(angle), which result in the value y/mag, one for where sin(angle) is increasing with increasing angle, and one for when it is decreasing. By looking at the previous point, we can determine which one we need.
def ending_phase(c, mag):
angle = math.asin(c[-1] / mag)
if c[-2] > c[-1]:
angle = np.pi - angle
return angle
From the phase of the last point, and the phase of the second last point, we can extrapolate the phase for the next point.
def next_phase(c, mag):
ph1 = ending_phase(c[:-1], mag)
ph2 = ending_phase(c, mag)
return 2 * ph2 - ph1
Passing the previous chunk to next_phase() computes the phase argument required to smoothly continue the chunk.
N = 1024
t = np.linspace(0, 1, N)
mag = 1.2
freq = 5.2
phase = 2.2
c1 = mag * np.sin(2 * np.pi * freq * t + phase)
plt.subplot(2,2,1)
plt.grid()
plt.plot(c1)
freq = 3.8
phase = next_phase(c1, mag)
c2 = mag * np.sin(2 * np.pi * freq * t + phase)
plt.subplot(2,2,2)
plt.grid()
plt.plot(c2)
c3 = np.concatenate((c1, c2))
plt.subplot(2,1,2)
plt.grid()
plt.plot(c3)
plt.show()

Find time shift of two signals using cross correlation

I have two signals which are related to each other and have been captured by two different measurement devices simultaneously.
Since the two measurements are not time synchronized there is a small time delay between them which I want to calculate. Additionally, I need to know which signal is the leading one.
The following can be assumed:
no or only very less noise present
speed of the algorithm is not an issue, only accuracy and robustness
signals are captured with an high sampling rate (>10 kHz) for several seconds
expected time delay is < 0.5s
I though of using-cross correlation for that purpose.
Any suggestions how to implement that in Python are very appreciated.
Please let me know if I should provide more information in order to find the most suitable algorithmn.
A popular approach: timeshift is the lag corresponding to the maximum cross-correlation coefficient. Here is how it works with an example:
import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
def lag_finder(y1, y2, sr):
n = len(y1)
corr = signal.correlate(y2, y1, mode='same') / np.sqrt(signal.correlate(y1, y1, mode='same')[int(n/2)] * signal.correlate(y2, y2, mode='same')[int(n/2)])
delay_arr = np.linspace(-0.5*n/sr, 0.5*n/sr, n)
delay = delay_arr[np.argmax(corr)]
print('y2 is ' + str(delay) + ' behind y1')
plt.figure()
plt.plot(delay_arr, corr)
plt.title('Lag: ' + str(np.round(delay, 3)) + ' s')
plt.xlabel('Lag')
plt.ylabel('Correlation coeff')
plt.show()
# Sine sample with some noise and copy to y1 and y2 with a 1-second lag
sr = 1024
y = np.linspace(0, 2*np.pi, sr)
y = np.tile(np.sin(y), 5)
y += np.random.normal(0, 5, y.shape)
y1 = y[sr:4*sr]
y2 = y[:3*sr]
lag_finder(y1, y2, sr)
In the case of noisy signals, it is common to apply band-pass filters first. In the case of harmonic noise, they can be removed by identifying and removing frequency spikes present in the frequency spectrum.
Numpy has function correlate which suits your needs: https://docs.scipy.org/doc/numpy/reference/generated/numpy.correlate.html
To complement Reveille's answer above (I reproduce his algorithm), I would like to point out some ideas for preprocessing the input signals.
Since there seems to be no fit-for-all (duration in periods, resolution, offset, noise, signal type, ...) you may play with it.
In my example the application of a window function improves the detected phase shift (within resolution of the discretization).
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
r2d = 180.0/np.pi # conversion factor RAD-to-DEG
delta_phi_true = 50.0/r2d
def detect_phase_shift(t, x, y):
'''detect phase shift between two signals from cross correlation maximum'''
N = len(t)
L = t[-1] - t[0]
cc = signal.correlate(x, y, mode="same")
i_max = np.argmax(cc)
phi_shift = np.linspace(-0.5*L, 0.5*L , N)
delta_phi = phi_shift[i_max]
print("true delta phi = {} DEG".format(delta_phi_true*r2d))
print("detected delta phi = {} DEG".format(delta_phi*r2d))
print("error = {} DEG resolution for comparison dphi = {} DEG".format((delta_phi-delta_phi_true)*r2d, dphi*r2d))
print("ratio = {}".format(delta_phi/delta_phi_true))
return delta_phi
L = np.pi*10+2 # interval length [RAD], for generality not multiple period
N = 1001 # interval division, odd number is better (center is integer)
noise_intensity = 0.0
X = 0.5 # amplitude of first signal..
Y = 2.0 # ..and second signal
phi = np.linspace(0, L, N)
dphi = phi[1] - phi[0]
'''generate signals'''
nx = noise_intensity*np.random.randn(N)*np.sqrt(dphi)
ny = noise_intensity*np.random.randn(N)*np.sqrt(dphi)
x_raw = X*np.sin(phi) + nx
y_raw = Y*np.sin(phi+delta_phi_true) + ny
'''preprocessing signals'''
x = x_raw.copy()
y = y_raw.copy()
window = signal.windows.hann(N) # Hanning window
#x -= np.mean(x) # zero mean
#y -= np.mean(y) # zero mean
#x /= np.std(x) # scale
#y /= np.std(y) # scale
x *= window # reduce effect of finite length
y *= window # reduce effect of finite length
print(" -- using raw data -- ")
delta_phi_raw = detect_phase_shift(phi, x_raw, y_raw)
print(" -- using preprocessed data -- ")
delta_phi_preprocessed = detect_phase_shift(phi, x, y)
Without noise (to be deterministic) the output is
-- using raw data --
true delta phi = 50.0 DEG
detected delta phi = 47.864788975654 DEG
...
-- using preprocessed data --
true delta phi = 50.0 DEG
detected delta phi = 49.77938053468019 DEG
...
Numpy has a useful function, called correlation_lags for this, which uses the underlying correlate function mentioned by other answers to find the time lag. The example displayed at the bottom of that page is useful:
from scipy import signal
from numpy.random import default_rng
rng = default_rng()
x = rng.standard_normal(1000)
y = np.concatenate([rng.standard_normal(100), x])
correlation = signal.correlate(x, y, mode="full")
lags = signal.correlation_lags(x.size, y.size, mode="full")
lag = lags[np.argmax(correlation)]
Then lag would be -100

Categories