I'm learning cross-spectrum and coherence. From what I understand, coherence is like the analogue of correlation in that you normalize the cross-spectrum by the product of individual power spectrum:
Here is my current python implementation
import numpy
def crossSpectrum(x,y):
#-------------------Remove mean-------------------
xp=x-numpy.mean(x)
yp=y-numpy.mean(y)
n=len(x)
# Do FFT
cfx=numpy.fft.fft(xp)/n
cfy=numpy.fft.fft(yp)/n
freq=numpy.fft.fftfreq(n)
# Get cross spectrum
cross=cfx.conj()*cfy
return cross,freq
#-------------Main---------------------------------
if __name__=='__main__':
x=numpy.linspace(-250,250,500)
noise=numpy.random.random(len(x))
y=10*numpy.sin(2*numpy.pi*x/10.)+5*numpy.sin(2*numpy.pi*x/5.)+\
2*numpy.sin(2*numpy.pi*x/20.)+10
y+=noise*10
y2=5*numpy.sin(2*numpy.pi*x/10.)+5+noise*50
p11,freq=crossSpectrum(y,y)
p22,freq=crossSpectrum(y2,y2)
p12,freq=crossSpectrum(y,y2)
# coherence
coh=numpy.abs(p12)**2/p11.real/p22.real
print coh
And my computed coherence is an array of 1s. What am I doing wrong?
Also, sometimes the coherence plot has downward pointing spikes (like the output of scipy.signal.coherence, in other places that are pointing upward (e.g. here). I'm bit confused by the interpretation of coherence, shouldn't a larger coherence implies covariability between the 2 timeseries at that frequency?
Thanks in advance.
You should have been using the welch method. As an example attached code similar to yours (with some simplifications) with expected results.
import numpy
from matplotlib.pyplot import plot, show, figure, ylim, xlabel, ylabel
def crossSpectrum(x, y, nperseg=1000):
#-------------------Remove mean-------------------
cross = numpy.zeros(nperseg, dtype='complex128')
for ind in range(x.size / nperseg):
xp = x[ind * nperseg: (ind + 1)*nperseg]
yp = y[ind * nperseg: (ind + 1)*nperseg]
xp = xp - numpy.mean(xp)
yp = yp - numpy.mean(xp)
# Do FFT
cfx = numpy.fft.fft(xp)
cfy = numpy.fft.fft(yp)
# Get cross spectrum
cross += cfx.conj()*cfy
freq=numpy.fft.fftfreq(nperseg)
return cross,freq
#-------------Main---------------------------------
if __name__=='__main__':
x=numpy.linspace(-2500,2500,50000)
noise=numpy.random.random(len(x))
y=10*numpy.sin(2*numpy.pi*x)
y2=5*numpy.sin(2*numpy.pi*x)+5+noise*50
p11,freq=crossSpectrum(y,y)
p22,freq=crossSpectrum(y2,y2)
p12,freq=crossSpectrum(y,y2)
# coherence
coh=numpy.abs(p12)**2/p11.real/p22.real
plot(freq[freq > 0], coh[freq > 0])
xlabel('Normalized frequency')
ylabel('Coherence')
and visualization
Related
I have tried Fourier transformation on two types of data:
Sample 1:
[5.22689618e-03 2.41534315e+00 0.00000000e+00 2.44972858e-01
4.25280086e-02 9.33815370e-01 3.03247287e-01 2.30718622e-01
3.67055310e-03 1.08563452e-01 1.65198349e+00 0.00000000e+00
0.00000000e+00 3.81988895e-01 5.76292398e-02 4.30815942e-01
1.26064346e+00 1.22264457e-01 9.20206465e-01 5.10205014e-02
1.69624045e-02 1.27946761e-04 9.85479035e-02 1.47322503e-03
5.88801835e-01 9.14815956e-04 1.58679916e-02 1.80580638e-04
1.33877873e-02 4.31785412e-01 4.29998728e+00 7.28365507e-02
2.93196111e-01 6.46923082e-01 1.64020382e+00 2.66995573e-01
1.73472541e+00 1.20450527e+00 2.93713233e-01 4.37012291e-02]
Sample 2:
[1.63397428e-05 5.06326672e-05 5.07890481e-05 3.27493146e-05
6.19592537e-04 2.14127001e-04 3.31584036e-04 3.67715480e-05
8.80083850e-05 2.70784860e-05 1.82332876e-05 1.13016894e-04
9.62697824e-06 6.82677776e-06 1.55218678e-05 1.17480350e-04
6.16809229e-05 7.51721123e-05 1.71455229e-04 3.89279781e-05
4.14875499e-04 2.91402507e-05 2.07895269e-04 3.54011492e-04
1.81316474e-06 2.28406092e-06 1.75001077e-04 3.21301398e-04
2.77558089e-05 4.87684894e-04 1.91564056e-05 5.09892912e-06
1.91527601e-04 4.01062880e-05 8.71198341e-06 2.87598374e-04
1.81759066e-04 4.45384356e-05 1.90984648e-05 5.30995670e-04
9.18908105e-05 6.19051866e-04 7.01213246e-06 1.97357403e-05
1.52822474e-06 4.44353810e-05 2.12599086e-04 1.10231829e-04
3.34317235e-05 1.10217651e-04 2.27115357e-05 2.43621107e-04
9.59827677e-05 1.99824695e-04 1.76506191e-04 2.83759047e-04
3.95528472e-05 1.04707169e-04 1.92821332e-05 1.69779942e-04
2.82886397e-04 1.42531512e-04 1.25545297e-05 6.64785458e-05
2.49983906e-04 7.36739683e-05 1.75280514e-04 6.63189677e-05
7.10295246e-06 1.45455223e-04 1.71038217e-04 1.80725103e-05
6.81610965e-05 5.38084605e-04 7.54303898e-05 1.74061569e-05
1.26723836e-04 8.94035463e-05 9.10723041e-05 1.54759188e-05
4.62195919e-05 4.98443430e-04 1.13484031e-04 6.36021767e-05
4.85053842e-05 6.72420582e-05 3.24306672e-04 1.92741144e-05
7.68752096e-04 3.27063864e-04 2.19143305e-05 7.59406815e-05
3.59140998e-05 5.66567689e-04 2.57783290e-04 1.20276603e-04
1.99017206e-05 3.27490253e-04 9.39513533e-05 1.01252991e-04]
I tried implementing Fourier filter transformation to extract signals from the original set but both signal does not show same findings. The sample 1 doesn't extract any function while sample 2 allows it, as shown in the figure
A partial code for the analysis is presented below.
# Positive sample
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
f = signal
t = np.arange(len(signal))
dt = 1
## Compute the Fast Fourier Transform (FFT)
n = len(t)
fhat = np.fft.fft(f,n) # Compute the FFT
PSD = fhat * np.conj(fhat) / n # Power spectrum (power per freq)
freq = (1/(dt*n)) * np.arange(n) # Create x-axis of frequencies in Hz
L = np.arange(1,np.floor(n/2),dtype='int') # Only plot the first half of freqs
#print('PSD value is', PSD, '\n\nfhat is', fhat, '\n\nfrequency is', freq)
plt.plot(freq, PSD)
# Extracting datasets ## Use the PSD to filter out noise
indices = PSD > 0.2 # Find all freqs with large power
low_indices = PSD < 0.2 # Find all freqs with small power
PSDnoise = PSD[low_indices] # store smaller freqs
PSDclean = PSD * indices # Zero out all others
fhat_noise = low_indices * fhat
fhat = indices * fhat # Zero out small Fourier coeffs. in Y
ffilt = np.fft.ifft(fhat) # Inverse FFT for filtered time signal
noise = np.fft.ifft(fhat_noise)
I repeat this process on the noise data for multiple times and the graph I obtained from three steps is:
May I Know why these two functions behave differently?
Thanks in advance!!
I am having issues fitting a Gaussian to my data. Currently the output for my code looks like
this. Where orange is the data, blue is the gaussian fit and green is an in-built gaussian fitter however I do not wish to use it as it never quite begins at zero and I do not have access to the code. I would like my output to look something like this where the drawn in red is the gaussian fit.
I have tried reading about the curve_fit documentation however at best I get a fit that looks like this which fits over all the data, however, this is undesirable as I am only interested in the central peak which is my main issue - I do not know how to get curve_fit to fit a gaussian on the central peak like in the second image.
I have considered using a weights function like np.random.choice() or looking at the data file's maximum value and then looking at the second derivative at either side of the central peak to see where there are changes in inflection but am unsure how best to implement this.
How would I best go about this? I have done a lot of googling but cant quite get my head around changing curve_fit to suit my needs.
Cheers for any pointers!
This is a data file.
https://drive.google.com/file/d/1qrAkD74U6L46GoGnvMiUHdPuLEToS6Pv/view?usp=sharing
This is my code:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from matplotlib.pyplot import figure
plt.close('all')
fpathB4 = 'E:\.1. Work - Current Projects + Old Projects\Current Projects\PF 4MHz Laser System\.8. 1050 SYSTEM\AC traces'
fpath = fpathB4.replace('\\','/') + ('/')
filename = '300'
with open(fpath+filename) as f:
dataraw = f.readlines()
FWHM = dataraw[8].split(':')[1].split()[0]
FWHM = np.float(FWHM)
print("##### For AC file -", filename, "#####")
print("Auto-co guess -", FWHM, "ps")
pulseduration = FWHM/np.sqrt(2)
pulseduration = str(pulseduration)
dataraw = dataraw[15:]
print("Pulse duration -", pulseduration, "ps" + "\n")
time = np.array([])
acf1 = np.array([]) #### DATA
fit = np.array([]) #### Gaussian fit
for k in dataraw:
data = k.split()
time = np.append(time, np.float(data[0]))
acf1= np.append(acf1, np.float(data[1]))
fit = np.append(fit, np.float(data[2]))
n = len(time)
y = acf1.copy()
x = time.copy()
mean = sum(x*y)/n
sigma = sum(y*(x-mean)**2)/n
def gaus(x,a,x0,sigma):
return a*np.exp(-(x-x0)**2/(2*sigma**2))
popt,pcov = curve_fit(gaus,x,y,p0=[1,mean,sigma])
plt.plot(x,gaus(x,*popt)/np.max(gaus(x,*popt)))
figure(num=1, figsize=(8, 3), dpi=96, facecolor='w', edgecolor='k') # figsize = (length, height)
plt.plot(time, acf1/np.max(acf1), label = 'Data - ' + filename, linewidth = 1)
plt.plot(time, fit/np.max(fit), label = '$FWHM_{{\Delta t}}$ (ps) = ' + pulseduration)
plt.autoscale(enable = True, axis = 'x', tight = True)
plt.title("Auto-Correlation Data")
plt.xlabel("Time (ps)")
plt.ylabel("Intensity (a.u.)")
plt.legend()
I think the problem might be that the data are not completely Gaussian-like. It seems you have some kind of Airy/sinc function due to the time resolution of your acquisition instrument. Still, if you are only interested in the center you can still fit it using a single gaussian:
import fitwrap as fw
import pandas as pd
df = pd.read_csv('300', skip_blank_lines=True, skiprows=13, sep='\s+')
def gaussian_no_offset(x, x0=2, sigma=1, amp=300):
return amp*np.exp(-(x-x0)**2/sigma**2)
fw.fit(gaussian_no_offset, df.time, df.acf1)
x0: 2.59158 +/- 0.00828 (0.3%) initial:2
sigma: 0.373 +/- 0.0117 (3.1%) initial:1
amp: 355.02 +/- 9.65 (2.7%) initial:300
If you want to be slightly more precise I can think of a sinc squared function for the peak and a broad gaussian offset. The fit seems nicer, but it really depends on what the data actually represents...
def sinc(x, x0=2.5, amp=300, width=1, amp_g=20, sigma=3):
return amp*(np.sinc((x-x0)/width))**2 + amp_g*np.exp(-(x-x0)**2/sigma**2)
fw.fit(sinc, df.time, df.acf1)
x0: 2.58884 +/- 0.0021 (0.1%) initial:2.5
amp: 303.84 +/- 3.7 (1.2%) initial:300
width: 0.49211 +/- 0.00565 (1.1%) initial:1
amp_g: 81.32 +/- 2.11 (2.6%) initial:20
sigma: 1.512 +/- 0.0351 (2.3%) initial:3
I'd add a constant to the Gaussian equation, and limit the range of that in the bounds parameter of curve fit, so that the graph isn't raised higher.
So your equation would be:
def gaus(y0,x,a,x0,sigma):
return y0 + a*np.exp(-(x-x0)**2/(2*sigma**2))
and the curve_fit bounds would be something like this:
curve_fit(..... ,bounds = [[0,a_min, x0_min, sigma_min],[0.1, a_max, x0_max, sigma_max]])
Given this simulated data:
import numpy as np
from statsmodels.tsa.arima_process import ArmaProcess
from statsmodels.tsa.statespace.structural import UnobservedComponents
np.random.seed(12345)
ar = np.r_[1, 0.9]
ma = np.array([1])
arma_process = ArmaProcess(ar, ma)
X = 100 + arma_process.generate_sample(nsample=100)
y = 1.2 * X + np.random.normal(size=100)
We build a UnobservedComponents model with the first 70 points to run inferences on the last 30 points like so:
model = UnobservedComponents(y[:70], level='llevel', exog=X[:70])
f_model = model.fit()
forecaster = f_model.get_forecast(
steps=30,
exog=X[70:].reshape(-1, 1)
)
conf_int = forecaster.conf_int()
If we observe the mean for the 95% confidence interval, we get the following:
conf_int.mean(axis=0)
array([118.19789195, 122.14101161])
But when trying to get the same values through model simulations, we don't quite get the same results. Here's the script we run for the simulated boundaries:
sim_model = UnobservedComponents(np.zeros(30), level='llevel', exog=X[70:])
res = []
predicted_state = f_model.predicted_state[..., -1]
predicted_state_cov = f_model.predicted_state_cov[..., -1]
for i in range(1000):
init_state = np.random.multivariate_normal(
predicted_state,
predicted_state_cov
)
sim = sim_model.simulate(
f_model.params,
30,
initial_state=init_state)
res.append(sim.mean())
Printing the lower 2.5 and upper 97.5 percentile we get:
np.percentile(res, [2.5, 97.5])
array([119.06735028, 121.26810407])
As we use model simulations to distinguish signal from noise in data, this difference ended up being big enough to lead to contradictory conclusions. If we make for instance:
y[70:] += 1
Then according to the first technique we conclude the new y carries no signal as its mean is lower than 122.14. But the same is not true if we use the second technique: as the upper boundary is 121.2, we conclude that there's signal.
What we are trying to understand now is whether this is expected. Shouldn't the lower and upper 95% confidence interval of both techniques be equal?
I am reading from a dataset which looks like the following when plotted in matplotlib and then taken the best fit curve using linear regression.
The sample of data looks like following:
# ID X Y px py pz M R
1.04826492772e-05 1.04828050287e-05 1.048233088e-05 0.000107002791008 0.000106552433081 0.000108704469007 387.02 4.81947797625e+13
1.87380963036e-05 1.87370588085e-05 1.87372620448e-05 0.000121616280029 0.000151924707761 0.00012371156585 428.77 6.54636174067e+13
3.95579877816e-05 3.95603773653e-05 3.95610756809e-05 0.000163470663023 0.000265203868883 0.000228031803626 470.74 8.66961875758e+13
My code looks the following:
# Regression Function
def regress(x, y):
#Return a tuple of predicted y values and parameters for linear regression.
p = sp.stats.linregress(x, y)
b1, b0, r, p_val, stderr = p
y_pred = sp.polyval([b1, b0], x)
return y_pred, p
# plotting z
xz, yz = M, Y_z # data, non-transformed
y_pred, _ = regress(xz, np.log(yz)) # change here # transformed input
plt.semilogy(xz, yz, marker='o',color ='b', markersize=4,linestyle='None', label="l.o.s within R500")
plt.semilogy(xz, np.exp(y_pred), "b", label = 'best fit') # transformed output
However I can see a lot upward scatter in the data and the best fit curve is affected by those. So first I want to isolate the data points which are 2 and 3 sigma away from my mean data, and mark them with circle around them.
Then take the best fit curve considering only the points which fall within 1 sigma of my mean data
Is there a good function in python which can do that for me?
Also in addition to that may I also isolate the data from my actual dataset, like if the third row in the sample input represents 2 sigma deviation may I have that row as an output too to save later and investigate more?
Your help is most appreciated.
Here's some code that goes through the data in a given number of windows, calculates statistics in said windows, and separates data in well- and misbehaved lists.
Hope this helps.
from scipy import stats
from scipy import polyval
import numpy as np
import matplotlib.pyplot as plt
num_data = 10000
fake_data_x = np.sort(12.8+np.random.random(num_data))
fake_data_y = np.exp(fake_data_x) + np.random.normal(0,scale=50000,size=num_data)
# Regression Function
def regress(x, y):
#Return a tuple of predicted y values and parameters for linear regression.
p = stats.linregress(x, y)
b1, b0, r, p_val, stderr = p
y_pred = polyval([b1, b0], x)
return y_pred, p
# plotting z
xz, yz = fake_data_x, fake_data_y # data, non-transformed
y_pred, _ = regress(xz, np.log(yz)) # change here # transformed input
plt.figure()
plt.semilogy(xz, yz, marker='o',color ='b', markersize=4,linestyle='None', label="l.o.s within R500")
plt.semilogy(xz, np.exp(y_pred), "b", label = 'best fit') # transformed output
plt.show()
num_bin_intervals = 10 # approx number of averaging windows
window_boundaries = np.linspace(min(fake_data_x),max(fake_data_x),int(len(fake_data_x)/num_bin_intervals)) # window boundaries
y_good = [] # list to collect the "well-behaved" y-axis data
x_good = [] # list to collect the "well-behaved" x-axis data
y_outlier = []
x_outlier = []
for i in range(len(window_boundaries)-1):
# create a boolean mask to select the data within the averaging window
window_indices = (fake_data_x<=window_boundaries[i+1]) & (fake_data_x>window_boundaries[i])
# separate the pieces of data in the window
fake_data_x_slice = fake_data_x[window_indices]
fake_data_y_slice = fake_data_y[window_indices]
# calculate the mean y_value in the window
y_mean = np.mean(fake_data_y_slice)
y_std = np.std(fake_data_y_slice)
# choose and select the outliers
y_outliers = fake_data_y_slice[np.abs(fake_data_y_slice-y_mean)>=2*y_std]
x_outliers = fake_data_x_slice[np.abs(fake_data_y_slice-y_mean)>=2*y_std]
# choose and select the good ones
y_goodies = fake_data_y_slice[np.abs(fake_data_y_slice-y_mean)<2*y_std]
x_goodies = fake_data_x_slice[np.abs(fake_data_y_slice-y_mean)<2*y_std]
# extend the lists with all the good and the bad
y_good.extend(list(y_goodies))
y_outlier.extend(list(y_outliers))
x_good.extend(list(x_goodies))
x_outlier.extend(list(x_outliers))
plt.figure()
plt.semilogy(x_good,y_good,'o')
plt.semilogy(x_outlier,y_outlier,'r*')
plt.show()
I'm trying to understand scipy.signal.deconvolve.
From the mathematical point of view a convolution is just the multiplication in fourier space so I would expect
that for two functions f and g:
Deconvolve(Convolve(f,g) , g) == f
In numpy/scipy this is either not the case or I'm missing an important point.
Although there are some questions related to deconvolve on SO already (like here and here) they do not address this point, others remain unclear (this) or unanswered (here). There are also two questions on SignalProcessing SE (this and this) the answers to which are not helpful in understanding how scipy's deconvolve function works.
The question would be:
How do you reconstruct the original signal f from a convoluted signal,
assuming you know the convolving function g.?
Or in other words: How does this pseudocode Deconvolve(Convolve(f,g) , g) == f translate into numpy / scipy?
Edit: Note that this question is not targeted at preventing numerical inaccuracies (although this is also an open question) but at understanding how convolve/deconvolve work together in scipy.
The following code tries to do that with a Heaviside function and a gaussian filter.
As can be seen in the image, the result of the deconvolution of the convolution is not at
all the original Heaviside function. I would be glad if someone could shed some light into this issue.
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
# Define heaviside function
H = lambda x: 0.5 * (np.sign(x) + 1.)
#define gaussian
gauss = lambda x, sig: np.exp(-( x/float(sig))**2 )
X = np.linspace(-5, 30, num=3501)
X2 = np.linspace(-5,5, num=1001)
# convolute a heaviside with a gaussian
H_c = np.convolve( H(X), gauss(X2, 1), mode="same" )
# deconvolute a the result
H_dc, er = scipy.signal.deconvolve(H_c, gauss(X2, 1) )
#### Plot ####
fig , ax = plt.subplots(nrows=4, figsize=(6,7))
ax[0].plot( H(X), color="#907700", label="Heaviside", lw=3 )
ax[1].plot( gauss(X2, 1), color="#907700", label="Gauss filter", lw=3 )
ax[2].plot( H_c/H_c.max(), color="#325cab", label="convoluted" , lw=3 )
ax[3].plot( H_dc, color="#ab4232", label="deconvoluted", lw=3 )
for i in range(len(ax)):
ax[i].set_xlim([0, len(X)])
ax[i].set_ylim([-0.07, 1.2])
ax[i].legend(loc=4)
plt.show()
Edit: Note that there is a matlab example, showing how to convolve/deconvolve a rectangular signal using
yc=conv(y,c,'full')./sum(c);
ydc=deconv(yc,c).*sum(c);
In the spirit of this question it would also help if someone was able to translate this example into python.
After some trial and error I found out how to interprete the results of scipy.signal.deconvolve() and I post my findings as an answer.
Let's start with a working example code
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
# let the signal be box-like
signal = np.repeat([0., 1., 0.], 100)
# and use a gaussian filter
# the filter should be shorter than the signal
# the filter should be such that it's much bigger then zero everywhere
gauss = np.exp(-( (np.linspace(0,50)-25.)/float(12))**2 )
print gauss.min() # = 0.013 >> 0
# calculate the convolution (np.convolve and scipy.signal.convolve identical)
# the keywordargument mode="same" ensures that the convolution spans the same
# shape as the input array.
#filtered = scipy.signal.convolve(signal, gauss, mode='same')
filtered = np.convolve(signal, gauss, mode='same')
deconv, _ = scipy.signal.deconvolve( filtered, gauss )
#the deconvolution has n = len(signal) - len(gauss) + 1 points
n = len(signal)-len(gauss)+1
# so we need to expand it by
s = (len(signal)-n)/2
#on both sides.
deconv_res = np.zeros(len(signal))
deconv_res[s:len(signal)-s-1] = deconv
deconv = deconv_res
# now deconv contains the deconvolution
# expanded to the original shape (filled with zeros)
#### Plot ####
fig , ax = plt.subplots(nrows=4, figsize=(6,7))
ax[0].plot(signal, color="#907700", label="original", lw=3 )
ax[1].plot(gauss, color="#68934e", label="gauss filter", lw=3 )
# we need to divide by the sum of the filter window to get the convolution normalized to 1
ax[2].plot(filtered/np.sum(gauss), color="#325cab", label="convoluted" , lw=3 )
ax[3].plot(deconv, color="#ab4232", label="deconvoluted", lw=3 )
for i in range(len(ax)):
ax[i].set_xlim([0, len(signal)])
ax[i].set_ylim([-0.07, 1.2])
ax[i].legend(loc=1, fontsize=11)
if i != len(ax)-1 :
ax[i].set_xticklabels([])
plt.savefig(__file__ + ".png")
plt.show()
This code produces the following image, showing exactly what we want (Deconvolve(Convolve(signal,gauss) , gauss) == signal)
Some important findings are:
The filter should be shorter than the signal
The filter should be much bigger than zero everywhere (here > 0.013 is good enough)
Using the keyword argument mode = 'same' to the convolution ensures that it lives on the same array shape as the signal.
The deconvolution has n = len(signal) - len(gauss) + 1 points.
So in order to let it also reside on the same original array shape we need to expand it by s = (len(signal)-n)/2 on both sides.
Of course, further findings, comments and suggestion to this question are still welcome.
As written in the comments, I cannot help with the example you posted originally. As #Stelios has pointed out, the deconvolution might not work out due to numerical issues.
I can, however, reproduce the example you posted in your Edit:
That is the code which is a direct translation from the matlab source code:
import numpy as np
import scipy.signal
import matplotlib.pyplot as plt
x = np.arange(0., 20.01, 0.01)
y = np.zeros(len(x))
y[900:1100] = 1.
y += 0.01 * np.random.randn(len(y))
c = np.exp(-(np.arange(len(y))) / 30.)
yc = scipy.signal.convolve(y, c, mode='full') / c.sum()
ydc, remainder = scipy.signal.deconvolve(yc, c)
ydc *= c.sum()
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(4, 4))
ax[0][0].plot(x, y, label="original y", lw=3)
ax[0][1].plot(x, c, label="c", lw=3)
ax[1][0].plot(x[0:2000], yc[0:2000], label="yc", lw=3)
ax[1][1].plot(x, ydc, label="recovered y", lw=3)
plt.show()