how to add error bars to histogram diagram in python - python

Hi I want to add error bars to the histogram within this code.I have seen few post about it but I didn't find them helpful.this code produce random numbers with Gaussian distribution and a kernel estimation apply to it.I need to have errorbars to estimate how much the histogram is inaccurate with changing the bandwidth
from random import *
import numpy as np
from matplotlib.pyplot import*
from matplotlib import*
import scipy.stats as stats
def hist_with_kde(data, bandwidth = 0.3):
#set number of bins using Freedman and Diaconis
q1 = np.percentile(data,25)
q3 = np.percentile(data,75)
n = len(data)**(.1/.3)
rng = max(data) - min(data)
iqr = 2*(q3-q1)
bins =int((n*rng)/iqr)
print(bins)
x = np.linspace(min(data),max(data),200)
kde = stats.gaussian_kde(data,'scott')
kde._compute_covariance()
kde.set_bandwidth()
plot(x,kde(x),'r') # distribution function
hist(data,bins=bins,normed=True) # histogram
data = np.random.normal(0,1,1000)
hist_with_kde(data,30)
show()

Combining the answer mentioned above with your code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
def hist_with_kde(data, bandwidth = 0.3):
#set number of bins using Freedman and Diaconis
q1 = np.percentile(data, 25)
q3 = np.percentile(data, 75)
n = len(data)**(.1/.3)
rng = max(data) - min(data)
iqr = 2*(q3-q1)
bins =int((n*rng)/iqr)
print(bins)
x = np.linspace(min(data), max(data), 200)
kde = stats.gaussian_kde(data, 'scott')
kde._compute_covariance()
kde.set_bandwidth()
plt.plot(x, kde(x), 'r') # distribution function
y, binEdges = np.histogram(data, bins=bins, normed=True)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
menStd = np.sqrt(y)
width = 0.2
plt.bar(bincenters, y, width=width, color='r', yerr=menStd)
data = np.random.normal(0, 1, 1000)
hist_with_kde(data, 30)
plt.show()
And have a look at the imports, as mentioned by MaxNoe

You can do it like this:
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
data = np.random.normal(size=10000)
# plt.hist gives you the entries, edges
# and drawables we do not need the drawables:
entries, edges, _ = plt.hist(data, bins=25, range=[-5, 5])
# calculate bin centers
bin_centers = 0.5 * (edges[:-1] + edges[1:])
# draw errobars, use the sqrt error. You can use what you want there
# poissonian 1 sigma intervals would make more sense
plt.errorbar(bin_centers, entries, yerr=np.sqrt(entries), fmt='r.')
plt.show()
Result:

This looks like a duplicate: Matplotlib histogram with errorbars
i.e. you have to use matplotlib.bar() to get error bars
Which in your example will look something like this:
You can replace
hist(data,bins=bins,normed=True)
with
y, binEdges = np.histogram(data,bins=bins)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
menStd = np.sqrt(y)
width=0.1
bar(bincenters,y,width=width, color='r', yerr=menStd)
Play around with the parameters until you find something you like :)

Related

How to numerically compute the mass map and density map for a collection of masses?

Good day to everyone. I was wondering if there is any way to extract a mass map and a mass density map for a scatter plot of mass distributions.
Developing the code for the mass distributions:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.ndimage.filters import gaussian_filter
from numpy.random import rand
# Finds nran number of random points in two dimensions
def randomizer(nran):
arr = rand(nran, 2)
return arr
# Calculates a sort of 'density' plot. Using this from a previous StackOverflow Question: https://stackoverflow.com/questions/2369492/generate-a-heatmap-in-matplotlib-using-a-scatter-data-set
def myplot(x, y, s, bins = 1000):
plot, xedges, yedges = np.histogram2d(x, y, bins = bins)
plot = gaussian_filter(plot, sigma = s)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
return plot.T, extent
Trying out an example:
arr = randomizer(1000)
plot, extent = myplot(arr[:, 0], arr[:, 1], 20)
fig, ax = plt.subplots(1, 2, figsize = (15, 5))
ax[0].scatter(arr[:, 0], arr[:, 1])
ax[0].set_aspect('equal')
ax[0].set_xlabel('x')
ax[0].set_ylabel('y')
ax[0].set_title('Scatter Plot')
img = ax[1].imshow(plot)
ax[1].set_title('Density Plot?')
ax[1].set_aspect('equal')
ax[1].set_xlabel('x')
ax[1].set_ylabel('y')
plt.colorbar(img)
This yields a scatter plot and what I think kind of represents a density plot (please correct if wrong). Now, suppose that each dot has a mass of 50 kg. Does the "density plot" represent a map of the total mass distribution (if that makes sense?)since the colorbar has a max value much less than 50. Then, using this, how can I compute a mass density for this mass distribution? I would really appreciate if someone could help. Thank you.
Edit: Added the website from where I got the heatmap function.
Okay, I think I've got the solution. I've been meaning to upload this for quite an amount of time. Here it goes:
# Importing packages
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from numpy.random import random
from scipy.stats import binned_statistic_2d
# Finds nran number of random points in two dimensions
def randomizer(nran):
arr_x = []
arr_y = []
for i in range(nran):
arr_x += [10 * random()] # Since random() only produces floats in (0, 1), I multiply by 10 (for illustrative purposes)
arr_y += [10 *random()] # Since random() only produces floats in (0, 1), I multiply by 10 (for illustrative purposes)
return arr_x, arr_y
# Computing weight array
def weights_array(weight, length):
weights = np.array([weight] * length)
return weights
# Computes a weighted histogram and divides it by the total grid area to get the density
def histogramizer(x_array, y_array, weights, num_pixels, Dimension):
Range = [0, Dimension] # Assumes the weights are distributed in a square area
grid, _, _, _ = binned_statistic_2d(x_array, y_array, weights, 'sum', bins=num_pixels, range=[Range,Range])
area = int(np.max(x_array)) * int(np.max(y_array))
density = grid/area
return density
Then, actually implementing this, one finds:
arr_x, arr_y = randomizer(1000000)
weights = []
for i in range(len(arr_x)):
weights += [50]
density = histogramizer(arr_x, arr_y, weights, [400,400], np.max(arr_x))
fig, ax = plt.subplots(figsize = (15, 5))
plt.imshow(density, extent = [0, int(np.max(arr_x)), 0, int(np.max(arr_x))]);
plt.colorbar(label = '$kg m^-2$');
The result I got for this was the following plot (I know it's generally not recommended to add a photo, but I wanted to add it for sake of showing my code's output):

How to plot confidence interval of a time series data in Python?

There are multiple questions exist on this area, however I can not use them to solve my question. I have a data sample and I want to create the confidence interval for its curve. Here, I provide a simple example:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
mean, lower, upper = [],[],[]
ci = 0.2
for i in range (20):
a = np.random.rand(100) # this is the output
MEAN = np.mean(a)
mean.append(MEAN)
std = np.std(a)
Upper = MEAN+ci*std
Lower = MEAN-ci*std
lower.append(Lower)
upper.append(Upper)
plt.figure(figsize=(20,8))
plt.plot(mean,'-b', label='mean')
plt.plot(upper,'-r', label='upper')
plt.plot(lower,'-g', label='lower')
plt.xlabel("Value", fontsize = 30)
plt.ylabel("Loss", fontsize = 30)
plt.xticks(fontsize= 30)
plt.yticks(fontsize= 30)
plt.legend(loc=4, prop={'size': 30})
In the above example, I drew %80 confidence interval. I have two questions:
1- Could you please tell me that this way of calculating and plotting the confidence interval is true?
2- I want to color the shadow area of the confidence interval. I have attached a figure, I want some thing like that. Could you please tell me if you have any solution? Thanks for your help.
I'm not qualified to answer question 1, however the answers to this SO question produce different results from your code.
As for question 2, you can use matplotlib fill_between to fill the area between two curves (the upper and lower of your example).
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
# https://stackoverflow.com/questions/15033511/compute-a-confidence-interval-from-sample-data
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
mean, lower, upper = [],[],[]
ci = 0.8
for i in range (20):
a = np.random.rand(100) # this is the output
m, ml, mu = mean_confidence_interval(a, ci)
mean.append(m)
lower.append(ml)
upper.append(mu)
plt.figure()
plt.plot(mean,'-b', label='mean')
plt.plot(upper,'-r', label='upper')
plt.plot(lower,'-g', label='lower')
# fill the area with black color, opacity 0.15
plt.fill_between(list(range(len(mean))), upper, lower, color="k", alpha=0.15)
plt.xlabel("Value")
plt.ylabel("Loss")
plt.legend()

How to segment a gaussian function to equal-volume parts

I'm trying to split a gaussian shaped curve, to K equal-volume segments, with Python for signal-filtering purposes.
I'm seeking for pseudo-code, general idea or a library that performs it.
Any help will be much appreciated.
Thanks!
For example in the image below: for K=6. volumes s1 = s2 = ... = s6:
You need to determine percentiles of the distribution. You can use for this scipy.stats.norm class and its .ppf() method.
import numpy as np
import scipy.stats as sps
import matplotlib.pyplot as plt
mu = 25
sigma = 4
splits = 8
# define the normal distribution and PDF
dist = sps.norm(loc=mu, scale=sigma)
x = np.linspace(dist.ppf(.001), dist.ppf(.999))
y = dist.pdf(x)
# calculate PPFs
step = 1 / splits
quantiles = np.arange(step, 1.0 - step / 2, step)
ppfs = dist.ppf(quantiles) # boundaries
# plot results
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(x, y, color='k')
for i, ppf in enumerate(ppfs):
ax.axvline(ppf, color=f'C{i}', label=f'{quantiles[i]:.3f}: {ppf:.1f}')
ax.legend()
plt.show()
This is based on this answer

Random Number from Histogram

Suppose I create a histogram using scipy/numpy, so I have two arrays: one for the bin counts, and one for the bin edges. If I use the histogram to represent a probability distribution function, how can I efficiently generate random numbers from that distribution?
It's probably what np.random.choice does in #Ophion's answer, but you can construct a normalized cumulative density function, then choose based on a uniform random number:
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
data = np.random.normal(size=1000)
hist, bins = np.histogram(data, bins=50)
bin_midpoints = bins[:-1] + np.diff(bins)/2
cdf = np.cumsum(hist)
cdf = cdf / cdf[-1]
values = np.random.rand(10000)
value_bins = np.searchsorted(cdf, values)
random_from_cdf = bin_midpoints[value_bins]
plt.subplot(121)
plt.hist(data, 50)
plt.subplot(122)
plt.hist(random_from_cdf, 50)
plt.show()
A 2D case can be done as follows:
data = np.column_stack((np.random.normal(scale=10, size=1000),
np.random.normal(scale=20, size=1000)))
x, y = data.T
hist, x_bins, y_bins = np.histogram2d(x, y, bins=(50, 50))
x_bin_midpoints = x_bins[:-1] + np.diff(x_bins)/2
y_bin_midpoints = y_bins[:-1] + np.diff(y_bins)/2
cdf = np.cumsum(hist.ravel())
cdf = cdf / cdf[-1]
values = np.random.rand(10000)
value_bins = np.searchsorted(cdf, values)
x_idx, y_idx = np.unravel_index(value_bins,
(len(x_bin_midpoints),
len(y_bin_midpoints)))
random_from_cdf = np.column_stack((x_bin_midpoints[x_idx],
y_bin_midpoints[y_idx]))
new_x, new_y = random_from_cdf.T
plt.subplot(121, aspect='equal')
plt.hist2d(x, y, bins=(50, 50))
plt.subplot(122, aspect='equal')
plt.hist2d(new_x, new_y, bins=(50, 50))
plt.show()
#Jaime solution is great, but you should consider using the kde (kernel density estimation) of the histogram. A great explanation why it's problematic to do statistics over histogram, and why you should use kde instead can be found here
I edited #Jaime's code to show how to use kde from scipy. It looks almost the same, but captures better the histogram generator.
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
def run():
data = np.random.normal(size=1000)
hist, bins = np.histogram(data, bins=50)
x_grid = np.linspace(min(data), max(data), 1000)
kdepdf = kde(data, x_grid, bandwidth=0.1)
random_from_kde = generate_rand_from_pdf(kdepdf, x_grid)
bin_midpoints = bins[:-1] + np.diff(bins) / 2
random_from_cdf = generate_rand_from_pdf(hist, bin_midpoints)
plt.subplot(121)
plt.hist(data, 50, normed=True, alpha=0.5, label='hist')
plt.plot(x_grid, kdepdf, color='r', alpha=0.5, lw=3, label='kde')
plt.legend()
plt.subplot(122)
plt.hist(random_from_cdf, 50, alpha=0.5, label='from hist')
plt.hist(random_from_kde, 50, alpha=0.5, label='from kde')
plt.legend()
plt.show()
def kde(x, x_grid, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scipy"""
kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
return kde.evaluate(x_grid)
def generate_rand_from_pdf(pdf, x_grid):
cdf = np.cumsum(pdf)
cdf = cdf / cdf[-1]
values = np.random.rand(1000)
value_bins = np.searchsorted(cdf, values)
random_from_cdf = x_grid[value_bins]
return random_from_cdf
Perhaps something like this. Uses the count of the histogram as a weight and chooses values of indices based on this weight.
import numpy as np
initial=np.random.rand(1000)
values,indices=np.histogram(initial,bins=20)
values=values.astype(np.float32)
weights=values/np.sum(values)
#Below, 5 is the dimension of the returned array.
new_random=np.random.choice(indices[1:],5,p=weights)
print new_random
#[ 0.55141614 0.30226256 0.25243184 0.90023117 0.55141614]
I had the same problem as the OP and I would like to share my approach to this problem.
Following Jaime answer and Noam Peled answer I've built a solution for a 2D problem using a Kernel Density Estimation (KDE).
Frist, let's generate some random data and then calculate its Probability Density Function (PDF) from the KDE. I will use the example available in SciPy for that.
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
def measure(n):
"Measurement model, return two coupled measurements."
m1 = np.random.normal(size=n)
m2 = np.random.normal(scale=0.5, size=n)
return m1+m2, m1-m2
m1, m2 = measure(2000)
xmin = m1.min()
xmax = m1.max()
ymin = m2.min()
ymax = m2.max()
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([m1, m2])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
fig, ax = plt.subplots()
ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
ax.plot(m1, m2, 'k.', markersize=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
And the plot is:
Now, we obtain random data from the PDF obtained from the KDE, which is the variable Z.
# Generate the bins for each axis
x_bins = np.linspace(xmin, xmax, Z.shape[0]+1)
y_bins = np.linspace(ymin, ymax, Z.shape[1]+1)
# Find the middle point for each bin
x_bin_midpoints = x_bins[:-1] + np.diff(x_bins)/2
y_bin_midpoints = y_bins[:-1] + np.diff(y_bins)/2
# Calculate the Cumulative Distribution Function(CDF)from the PDF
cdf = np.cumsum(Z.ravel())
cdf = cdf / cdf[-1] # NormalizaĆ§Ć£o
# Create random data
values = np.random.rand(10000)
# Find the data position
value_bins = np.searchsorted(cdf, values)
x_idx, y_idx = np.unravel_index(value_bins,
(len(x_bin_midpoints),
len(y_bin_midpoints)))
# Create the new data
new_data = np.column_stack((x_bin_midpoints[x_idx],
y_bin_midpoints[y_idx]))
new_x, new_y = new_data.T
And we can calculate the KDE from this new data and the plot it.
kernel = stats.gaussian_kde(new_data.T)
new_Z = np.reshape(kernel(positions).T, X.shape)
fig, ax = plt.subplots()
ax.imshow(np.rot90(new_Z), cmap=plt.cm.gist_earth_r,
extent=[xmin, xmax, ymin, ymax])
ax.plot(new_x, new_y, 'k.', markersize=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
Here is a solution, that returns datapoints that are uniformly distributed within each bin instead of the bin center:
def draw_from_hist(hist, bins, nsamples = 100000):
cumsum = [0] + list(I.np.cumsum(hist))
rand = I.np.random.rand(nsamples)*max(cumsum)
return [I.np.interp(x, cumsum, bins) for x in rand]
A few things do not work well for the solutions suggested by #daniel, #arco-bast, et al
Taking the last example
def draw_from_hist(hist, bins, nsamples = 100000):
cumsum = [0] + list(I.np.cumsum(hist))
rand = I.np.random.rand(nsamples)*max(cumsum)
return [I.np.interp(x, cumsum, bins) for x in rand]
This assumes that at least the first bin has zero content, which may or may not be true. Secondly, this assumes that the value of the PDF is at the upper bound of the bins, which it isn't - it's mostly in the centre of the bin.
Here's another solution done in two parts
def init_cdf(hist,bins):
"""Initialize CDF from histogram
Parameters
----------
hist : array-like, float of size N
Histogram height
bins : array-like, float of size N+1
Histogram bin boundaries
Returns:
--------
cdf : array-like, float of size N+1
"""
from numpy import concatenate, diff,cumsum
# Calculate half bin sizes
steps = diff(bins) / 2 # Half bin size
# Calculate slope between bin centres
slopes = diff(hist) / (steps[:-1]+steps[1:])
# Find height of end points by linear interpolation
# - First part is linear interpolation from second over first
# point to lowest bin edge
# - Second part is linear interpolation left neighbor to
# right neighbor up to but not including last point
# - Third part is linear interpolation from second to last point
# over last point to highest bin edge
# Can probably be done more elegant
ends = concatenate(([hist[0] - steps[0] * slopes[0]],
hist[:-1] + steps[:-1] * slopes,
[hist[-1] + steps[-1] * slopes[-1]]))
# Calculate cumulative sum
sum = cumsum(ends)
# Subtract off lower bound and scale by upper bound
sum -= sum[0]
sum /= sum[-1]
# Return the CDF
return sum
def sample_cdf(cdf,bins,size):
"""Sample a CDF defined at specific points.
Linear interpolation between defined points
Parameters
----------
cdf : array-like, float, size N
CDF evaluated at all points of bins. First and
last point of bins are assumed to define the domain
over which the CDF is normalized.
bins : array-like, float, size N
Points where the CDF is evaluated. First and last points
are assumed to define the end-points of the CDF's domain
size : integer, non-zero
Number of samples to draw
Returns
-------
sample : array-like, float, of size ``size``
Random sample
"""
from numpy import interp
from numpy.random import random
return interp(random(size), cdf, bins)
# Begin example code
import numpy as np
import matplotlib.pyplot as plt
# initial histogram, coarse binning
hist,bins = np.histogram(np.random.normal(size=1000),np.linspace(-2,2,21))
# Calculate CDF, make sample, and new histogram w/finer binning
cdf = init_cdf(hist,bins)
sample = sample_cdf(cdf,bins,1000)
hist2,bins2 = np.histogram(sample,np.linspace(-3,3,61))
# Calculate bin centres and widths
mx = (bins[1:]+bins[:-1])/2
dx = np.diff(bins)
mx2 = (bins2[1:]+bins2[:-1])/2
dx2 = np.diff(bins2)
# Plot, taking care to show uncertainties and so on
plt.errorbar(mx,hist/dx,np.sqrt(hist)/dx,dx/2,'.',label='original')
plt.errorbar(mx2,hist2/dx2,np.sqrt(hist2)/dx2,dx2/2,'.',label='new')
plt.legend()
Sorry, I don't know how to get this to show up in StackOverflow, so copy'n'paste and run to see the point.
I stumbled upon this question when I was looking for a way to generate a random array based on a distribution of another array. If this would be in numpy, I would call it random_like() function.
Then I realized, I have written a package Redistributor which might do this for me even though the package was created with a bit different motivation (Sklearn transformer capable of transforming data from an arbitrary distribution to an arbitrary known distribution for machine learning purposes). Of course I understand unnecessary dependencies are not desired, but at least knowing this package might be useful to you someday. The thing OP asked about is basically done under the hood here.
WARNING: under the hood, everything is done in 1D. The package also implements multidimensional wrapper, but I have not written this example using it as I find it to be too niche.
Installation:
pip install git+https://gitlab.com/paloha/redistributor
Implementation:
import numpy as np
import matplotlib.pyplot as plt
def random_like(source, bins=0, seed=None):
from redistributor import Redistributor
np.random.seed(seed)
noise = np.random.uniform(source.min(), source.max(), size=source.shape)
s = Redistributor(bins=bins, bbox=[source.min(), source.max()]).fit(source.ravel())
s.cdf, s.ppf = s.source_cdf, s.source_ppf
r = Redistributor(target=s, bbox=[noise.min(), noise.max()]).fit(noise.ravel())
return r.transform(noise.ravel()).reshape(noise.shape)
source = np.random.normal(loc=0, scale=1, size=(100,100))
t = random_like(source, bins=80) # More bins more precision (0 = automatic)
# Plotting
plt.figure(figsize=(12,4))
plt.subplot(121); plt.title(f'Distribution of source data, shape: {source.shape}')
plt.hist(source.ravel(), bins=100)
plt.subplot(122); plt.title(f'Distribution of generated data, shape: {t.shape}')
plt.hist(t.ravel(), bins=100); plt.show()
Explanation:
import numpy as np
import matplotlib.pyplot as plt
from redistributor import Redistributor
from sklearn.metrics import mean_squared_error
# We have some source array with "some unknown" distribution (e.g. an image)
# For the sake of example we just generate a random gaussian matrix
source = np.random.normal(loc=0, scale=1, size=(100,100))
plt.figure(figsize=(12,4))
plt.subplot(121); plt.title('Source data'); plt.imshow(source, origin='lower')
plt.subplot(122); plt.title('Source data hist'); plt.hist(source.ravel(), bins=100); plt.show()
# We want to generate a random matrix from the distribution of the source
# So we create a random uniformly distributed array called noise
noise = np.random.uniform(source.min(), source.max(), size=(100,100))
plt.figure(figsize=(12,4))
plt.subplot(121); plt.title('Uniform noise'); plt.imshow(noise, origin='lower')
plt.subplot(122); plt.title('Uniform noise hist'); plt.hist(noise.ravel(), bins=100); plt.show()
# Then we fit (approximate) the source distribution using Redistributor
# This step internally approximates the cdf and ppf functions.
s = Redistributor(bins=200, bbox=[source.min(), source.max()]).fit(source.ravel())
# A little naming workaround to make obj s work as a target distribution
s.cdf = s.source_cdf
s.ppf = s.source_ppf
# Here we create another Redistributor but now we use the fitted Redistributor s as a target
r = Redistributor(target=s, bbox=[noise.min(), noise.max()])
# Here we fit the Redistributor r to the noise array's distribution
r.fit(noise.ravel())
# And finally, we transform the noise into the source's distribution
t = r.transform(noise.ravel()).reshape(noise.shape)
plt.figure(figsize=(12,4))
plt.subplot(121); plt.title('Transformed noise'); plt.imshow(t, origin='lower')
plt.subplot(122); plt.title('Transformed noise hist'); plt.hist(t.ravel(), bins=100); plt.show()
# Computing the difference between the two arrays
print('Mean Squared Error between source and transformed: ', mean_squared_error(source, t))
Mean Squared Error between source and transformed: 2.0574123162302143

Can i put a color changer in a loop?

So basically what i'm wondering, is at the bottom of my code when i plot the graph of my trials, is there a way to run a color generator through there? Or more explicitly put, could i make a list of warm colors, and put that into my plot function, where it runs through each color in a list as the loop runs through, and therefore my plot would only consist of warm colors?
from numpy import *
from pylab import show,plot
from scipy.special import erfinv
n = 366 #number of days
ntrials = 5000
u = random.rand(ntrials)
v = sqrt(2.)*erfinv(2.*u-1.)
mu = 0
sigma = .05
investment = 1000.
data = empty((ntrials,n))
data[:,0] = investment
for t in range(n-1):
u = random.rand(ntrials)
v = sqrt(2.)*erfinv(2.*u-1.)
epsilon = v
data[:,t+1] = (1. + mu +sigma*epsilon)*data[:,t]
data2 = data.sum(axis=0)
woo = data2[-1]/ntrials
data3 = data2[-1]
x = linspace(0,n,n)
for t in range(n):
plot(x,data[t,:])
show()
It sounds like you just want something like this?
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
# Generate data...
nx, nsteps = 100, 20
x = np.linspace(0, 1, nx)
data = np.random.random((nx, nsteps)) - 0.5
data = data.cumsum(axis=0)
data = data.cumsum(axis=1)
# Plot
cmap = mpl.cm.autumn
for i, y in enumerate(data.T):
plt.plot(x, y, color=cmap(i / float(nsteps)))
plt.show()
The key is that calling a matplotlib colormap instance with a value between 0 and 1 will return a color (where 0 is the lowest color in the colormap and 1 is the highest).
For a list of available colormaps, see here. You can access the reversed version of any of these with name_r (e.g. the reversed version of mpl.cm.autumn is mpl.cm.autumn_r).

Categories