Creating video of graph over time - python

I want to create a video of a graph as it evolves over time. I have tried stitching together PNG images of the graph, but it has 10,000 frames, which takes a VERY long time. I now want to try to use animate.FuncAnimation(), but I have been having a lot of trouble. Here is what I have so far:
def plot(fname, haveMLPY=False):
# Load data from .npz file.
data = np.load(fname)
X = data["X"]
T = data["T"]
N = X.shape[1]
A = data["vipWeights"]
degrees = A.sum(1)
ksB = data["ksB"]
# Initialize a figure.
figure = plt.figure()
files=[]
# filename for the name of the resulting movie
filename = 'animation'
from mpl_toolkits.mplot3d import Axes3D
for i in range(10**4):
mp = X[i,:,0]
data2 = np.c_[degrees, ksB, mp]
# Create best fit surface for data2
# regular grid covering the domain of the data
mn = np.min(data2, axis=0)
mx = np.max(data2, axis=0)
X_grid, Y_grid = np.meshgrid(np.linspace(mn[0], mx[0], 20), np.linspace(mn[1], mx[1], 20))
XX = X_grid.flatten()
YY = Y_grid.flatten()
order = 2 # 1: linear, 2: quadratic
if order == 1:
# best-fit linear plane
A = np.c_[data2[:,0], data2[:,1], np.ones(data2.shape[0])]
C,_,_,_ = scipy.linalg.lstsq(A, data2[:,2]) # coefficients
# evaluate it on grid
Z = C[0]*X_grid + C[1]*Y_grid + C[2]
# or expressed using matrix/vector product
#Z = np.dot(np.c_[XX, YY, np.ones(XX.shape)], C).reshape(X.shape)
elif order == 2:
# best-fit quadratic curve
A = np.c_[np.ones(data2.shape[0]), data2[:,:2], np.prod(data2[:,:2], axis=1), data2[:,:2]**2]
C,_,_,_ = scipy.linalg.lstsq(A, data2[:,2])
# evaluate it on a grid
Z = np.dot(np.c_[np.ones(XX.shape), XX, YY, XX*YY, XX**2, YY**2], C).reshape(X_grid.shape)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X_grid, Y_grid, Z, rstride=1, cstride=1, alpha=0.2)
ax.scatter(degrees, ksB, mp)
ax.set_xlabel('degrees')
ax.set_ylabel('ksB')
ax.set_zlabel('mp')
# form a filename
fname2 = '_tmp%03d.png'%i
# save the frame
savefig(fname2)
# append the filename to the list
files.append(fname2)
# call mencoder
os.system("mencoder 'mf://_tmp*.png' -mf type=png:fps=10 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o " + filename + ".mpg")
# cleanup
for fname2 in files: os.remove(fname2)
All the code from
# Create best fit surface for data2
to
fig = plt.figure()
can be mostly ignored because it is just used to calculate the best fit plane for the data.
Basically, there are N neurons, each of which has three important properties I want to plot: degrees, ksB, and mp. Only mp changes with time. All the data for mp is stored in X. The format X[i, i, i] means X[time, neuron, data type]. Right now, I am looping through X[i,:,0] (mp is the 0th variable). Taking screenshots of all 10^4 images takes forever, and the axis for mp keeps changing.
Is there a way to speed this up (either using animation.FuncAnimation or some other thing) and also prevent the axis from shifting each frame?
Thanks!

Related

Create 3D Streamtube plot in Plotly

Aim
I would like to create a 3D Streamtube Plot with Plotly.
Here is a cross-section of the vector field in the middle of the plot to give you an idea of how it looks like:
The final vector field should have rotational symmetry.
My Attempt
Download the data here: https://filebin.net/x6ywfuo6v4851v74
Run the code bellow:
Code:
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import numpy as np
import plotly.io as pio
pio.renderers.default='browser'
# Import data to pandas
df = pd.read_csv("data.csv")
# Plot
X = np.linspace(0,1,101)
Y = np.linspace(0,1,10)
Z = np.linspace(0,1,101)
# Points from which the streamtubes should originate
xpos,ypos = np.meshgrid(X[::5],Y, indexing="xy")
xpos = xpos.reshape(1,-1)[0]
ypos = ypos.reshape(1,-1)[0]
starting_points = px.scatter_3d(
x=xpos,
y=ypos,
z=[-500]*len(xpos)
)
starting_points.show()
# Streamtube Plot
data_plot = [go.Streamtube(
x = df['x'],
y = df['y'],
z = df['z'],
u = df['u'],
v = df['v'],
w = df['w'],
starts = dict( #Determines the streamtubes starting position.
x=xpos,
y=ypos,
z=[-500]*len(xpos)
),
#sizeref = 0.3,
colorscale = 'jet',
showscale = True,
maxdisplayed = 300 #Determines the maximum segments displayed in a streamtube.
)]
fig = go.Figure(data=data_plot)
fig.show()
The initial points (starting points) of the streamtubes seem to be nicely defined:
...but the resulting 3D streamtube plot is very weird:
Edit
I tried normalizing the field plot, but the result is still not satisfactory:
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import plotly.io as pio
pio.renderers.default='browser'
# Import data to pandas
df = pd.read_csv("data.csv")
# NORMALIZE VECTOR FIELD -> between [0,1]
df["u"] = (df["u"]-df["u"].min()) / (df["u"].max()-df["u"].min())
df["v"] = (df["v"]-df["v"].min()) / (df["v"].max()-df["v"].min())
df["w"] = (df["w"]-df["w"].min()) / (df["w"].max()-df["w"].min())
# Plot
X = np.linspace(0,1,101)
Y = np.linspace(0,1,10)
Z = np.linspace(0,1,101)
# Points from which the streamtubes should originate
xpos,ypos = np.meshgrid(X[::5],Y, indexing="xy")
xpos = xpos.reshape(1,-1)[0]
ypos = ypos.reshape(1,-1)[0]
# Streamtube Plot
data_plot = [go.Streamtube(
x = df['x'],
y = df['y'],
z = df['z'],
u = df['u'],
v = df['v'],
w = df['w'],
starts = dict( #Determines the streamtubes starting position.
x=xpos,
y=ypos,
z=[0]*len(xpos)
),
#sizeref = 0.3,
colorscale = 'jet',
showscale = True,
maxdisplayed = 300 #Determines the maximum segments displayed in a streamtube.
)]
fig = go.Figure(data=data_plot)
fig.show()
Data
As for the data itself:
It is created from 10 slices (y-direction). For each slice (y), [u,v,w] on a regular xz mesh (101x101) was computed. The whole was then assembled into the dataframe which you can download, and which has 101x101x10 data points.
Edit 2
It may be that I am wrongly converting my original data (download here: https://filebin.net/tlgkz3fy1h3j6h5o) into the format suitable for plotly, hence I was wondering if you know how this can be done correctly?
Here some code to visualize the data in a 3D vector plot correctly:
# %%
import pickle
import numpy as np
import matplotlib.pyplot as plt
# Import Full Data
with open("full_data.pickle", 'rb') as handle:
full_data = pickle.load(handle)
# Axis
X = np.linspace(0,1,101)
Y = np.linspace(0,1,10)
Z = np.linspace(-500,200,101)
# Initialize List of all fiels
DX = []
DY = []
DZ = []
for cross_section in list(full_data["cross_sections"].keys()):
# extract field components in x, y, and z
dx,dy,dz = full_data["cross_sections"][cross_section]
# Make them numpy imediatley
dx = np.array(dx)
dy = np.array(dy)
dz = np.array(dz)
# Apppend
DX.append(dx)
DY.append(dy)
DZ.append(dz)
#Convert to numpy
DX = np.array(DX)
DY = np.array(DY)
DZ = np.array(DZ)
# Create 3D Quiver Plot with color gradient
# Source: https://stackoverflow.com/questions/65254887/how-to-plot-with-matplotlib-a-3d-quiver-plot-with-color-gradient-for-length-giv
def plot_3d_quiver(x, y, z, u, v, w):
# COMPUTE LENGTH OF VECTOR -> MAGNITUDE
c = np.sqrt(np.abs(v) ** 2 + np.abs(u) ** 2 + np.abs(w) ** 2)
c = (c.ravel() - c.min()) / c.ptp()
# Repeat for each body line and two head lines
c = np.concatenate((c, np.repeat(c, 2)))
# Colormap
c = plt.cm.jet(c)
fig = plt.figure(dpi =300)
ax = fig.gca(projection='3d')
ax.quiver(x, y, z, u, v, w, colors=c, length=0.2, arrow_length_ratio=0.7)
plt.gca().invert_zaxis()
plt.show()
# Create Mesh !
xi, yi, zi = np.meshgrid(X, Y, Z, indexing='xy')
skip_every = 5
skip_slice = 2
skip3D=(slice(None,None,skip_slice),slice(None,None,skip_every),slice(None,None,skip_every))
# Source: https://stackoverflow.com/questions/68690442/python-plotting-3d-vector-field
plot_3d_quiver(xi[skip3D], yi[skip3D], zi[skip3D]/1000, DX[skip3D], DY[skip3D],
np.moveaxis(DZ[skip3D],2,1))
As you can see there are some long downward vectors in the middle of the 3D space, which is not shown in the plotly tubes.
Edit 3
Using the code from the answer, I get this:
This is a huge improvement. This looks almost perfect and is in accordance to what I expect.
A few more questions:
Is there a way to also show some tubes at the lower part of the plot?
Is there a way to flip the z-axis, such that the tubes are coming down from -z to +z (like shown in the cross-section streamline plot) ?
How does the data need to be structured to be organized correctly for the plotly plot? I ask that because of the use of np.moveaxis()?
I have rewritten my answer to reflect the history of conversation but in a disciplined manner.
The situation is:
len(np.unique(df['x']))
>>> 101
that when compared with:
len(np.unique(df['y']))
>>> 10
Seems data in y-direction are much coarser than that of x-direction!
But in z-direction the situation is even worse because the range of data are way more than that of x and y:
df.min()
>>> x 0.000000
y 0.000000
z -500.000000
u -0.369106
v -0.259156
w -0.517652
df.max()
>>> x 1.000000
y 1.000000
z 200.000000
u 0.368312
v 0.238271
w 1.257869
The solution to the ill formed data-set comprises of three steps:
Normalize the vector field and sample points in each direction
Either reduce data density in x and z direction or increase density of data on y-axis.(This step is optional but generally recommended)
After making a plot based on the new data, change axis ticks to the real values.
To normalize a vector-field in this situation which apparently is an engineering one, it's important to maintain the relative length of vectors on every spacial point by doing it this way:
# NORMALIZE VECTOR FIELD -> between [0,1]
np_df = np.array([u, v, w])
vecf_norm = np.linalg.norm(np_df, 2, axis=0)
max_norm = np.max(vecf_norm)
min_norm = np.min(vecf_norm)
u = u * (vecf_norm - min_norm) / (max_norm - min_norm)
v = v * (vecf_norm - min_norm) / (max_norm - min_norm)
w = w * (vecf_norm - min_norm) / (max_norm - min_norm)
As you will see at the end, this formulation will be used to enhance the resulting tube-plot.
Please let me add some important details about using dimensionless data for engineering data visualisation:
First of all if this vector field is resulted from any sort of differential equations, it is highly recommended to reformulate your P.D.F. to a dimensionless equation before attempting to solve it numerically.
If the vector field is result of an already dimensionless differential equation, you need to plot it using dimensionless data (including geometry and u,v,w values).
Please consider plotly uses the local divergence values to determine the local diameter of the tubes. When changing the vector field (and the geometry) we are changing the divergence as well.
I tried to mix your initial and second codes to get this:
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import numpy as np
import plotly.io as pio
import pickle
pio.renderers.default='browser'
# Import Full Data
with open("full_data.pickle", 'rb') as handle:
full_data = pickle.load(handle)
# Axis
X = np.linspace(0,1,101)
Y = np.linspace(0,1,10)
Z = np.linspace(-0.5,0.2,101)
xpos,ypos = np.meshgrid(X[::5],Y, indexing="ij")
#xpos = xpos.reshape(1,-1)[0]
#ypos = ypos.reshape(1,-1)[0]
xpos = np.ravel(xpos)
ypos = np.ravel(ypos)
# Initialize List of all fields
DX = []
DY = []
DZ = []
for cross_section in list(full_data["cross_sections"]):
# extract field components in x, y, and z
dx,dy,dz = full_data["cross_sections"][cross_section]
# Make them numpy imediatley
dx = np.array(dx)
dy = np.array(dy)
dz = np.array(dz)
# Apppend
DX.append(dx)
DY.append(dy)
DZ.append(dz)
#Convert to numpy
move_i = [0, 1, 2]
move_e = [1, 2, 0]
DX = np.moveaxis(np.array(DX), move_i, move_e)
DY = np.moveaxis(np.array(DY), move_i, move_e)
DZ = np.moveaxis(np.array(DZ), move_i, move_e)
# Create Mesh !
xi, yi, zi = np.meshgrid(X, Y, Z, indexing="ij")
data_plot = [go.Streamtube(
x = np.ravel(xi),
y = np.ravel(yi),
z = np.ravel(zi),
u = np.ravel(DX),
v = np.ravel(DY),
w = np.ravel(DZ),
starts = dict( #Determines the streamtubes starting position.
x=xpos,
y=ypos,
z=np.array([-0.5]*len(xpos)
)),
#sizeref = 0.3,
colorscale = 'jet',
showscale = True,
maxdisplayed = 300 #Determines the maximum segments displayed in a streamtube.
)]
fig = go.Figure(data=data_plot)
fig.show()
In this code I have removed the skipping thing, because I suspect the evil is happening there. The resulting plot which you have added to your question, seems similar to the 2D plot of your question, but it requires more work to have better result.
So using what have been told already in addition to the info below:
Yes, Tubes are started from the start points, so you need to define start points where you expect to see tubes there! but, the start points need to be geometrically inside the space defined by sample points, otherwise maybe plotly be forced to extrapolate data (I'm not sure about this) and it results in distorted and unexpected results. This means you can define start points both in upper and lower planes of the field to ensure that you have vectors which emit on both planes. Sometime the vectors are there but you can not see them because they are drawn too thin to see. It's because their local divergences are too low, may be if you normalize this vector field by the rules mentioned earlier, it gives you a better result.
According to plotly documentation:
You can tell plotly's automatic axis range calculation logic to reverse the direction of an axis by setting the autorange axis property to "reversed"
plotly reads data point-by-point, so the order of points doesn't really matter but in case of your problem, the issue happens when data became corrupted and disturbed during omitting of some of sample points. i.e. some of x,y,z and some of u,v,w data loosed their correct location which resulted in an entirely different unexpected data set.
I have tried to normalize the (u,v,w) vector-field(using the formulation provided earlier):
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import numpy as np
import plotly.io as pio
import pickle
pio.renderers.default='browser'
# Import Full Data
with open("full_data.pickle", 'rb') as handle:
full_data = pickle.load(handle)
# Axis
X = np.linspace(0,1,101)
Y = np.linspace(0,1,10)
Z = np.linspace(-0.5,0.2,101)
xpos,ypos = np.meshgrid(X[::5],Y, indexing="ij")
#xpos = xpos.reshape(1,-1)[0]
#ypos = ypos.reshape(1,-1)[0]
xpos = np.ravel(xpos)
ypos = np.ravel(ypos)
# Initialize List of all fields
DX = []
DY = []
DZ = []
for cross_section in list(full_data["cross_sections"]):
# extract field components in x, y, and z
dx,dy,dz = full_data["cross_sections"][cross_section]
# Make them numpy imediatley
dx = np.array(dx)
dy = np.array(dy)
dz = np.array(dz)
# Apppend
DX.append(dx)
DY.append(dy)
DZ.append(dz)
#Convert to numpy
move_i = [0, 1, 2]
move_e = [1, 2, 0]
DX = np.moveaxis(np.array(DX), move_i, move_e)
DY = np.moveaxis(np.array(DY), move_i, move_e)
DZ = np.moveaxis(np.array(DZ), move_i, move_e)
u1 = np.ravel(DX)
v1 = np.ravel(DY)
w1 = np.ravel(DZ)
np_df = np.array([u1, v1, w1])
vecf_norm = np.linalg.norm(np_df, 2, axis=0)
max_norm = np.max(vecf_norm)
min_norm = np.min(vecf_norm)
u2 = u1 * (vecf_norm - min_norm) / (max_norm - min_norm)
v2 = v1 * (vecf_norm - min_norm) / (max_norm - min_norm)
w2 = w1 * (vecf_norm - min_norm) / (max_norm - min_norm)
# Create Mesh !
xi, yi, zi = np.meshgrid(X, Y, Z, indexing="ij")
data_plot = [go.Streamtube(
x = np.ravel(xi),
y = np.ravel(yi),
z = np.ravel(zi),
u = u2,
v = v2,
w = w2,
starts = dict( #Determines the streamtubes starting position.
x=xpos,
y=ypos,
z=np.array([-0.5]*len(xpos)
)),
#sizeref = 0.3,
colorscale = 'jet',
showscale = True,
maxdisplayed = 300 #Determines the maximum segments displayed in a streamtube.
)]
fig = go.Figure(data=data_plot)
fig.show()
and get a better plot:

Decay overlaid with function

I am trying to plot decay (1/r, 1/r^2 and 1/r^3) with my functions figure
My issue is that the decay lines are not near the function plots so it's difficult to see which decay line fits which function. I would like the decay lines to overlay the function.
I have tried subtracting a number from the 1/x function to shift it down but that did not work.
#load data from matlab
mat = loadmat('model01.mat')
#unpack data from matlab, one distance and velocities in 4 different directions
dist = mat['Dist_A_mm01']
distar = np.array(dist)
vpos = mat['Model_Posterior01_m']
vposar = np.array(vpos)
vant = mat['Model_Anterior01_m']
vantar = np.array(vant)
vleft = mat['Model_Left01_m']
vleftar = np.array(vleft)
vright = mat['Model_Right01_m']
vrightar = np.array(vright)
# transpose data
distar = np.transpose(distar)
vposar = np.transpose(vposar)
vantar = np.transpose(vantar)
vleftar = np.transpose(vleftar)
vrightar = np.transpose(vrightar)
#select numbers from array to plot (number in place 0 is 0 which gives an error when dividing by zero later)
dd = distar[1:50]
#plot the data from matlab in a log log graph
ax = plt.axes()
plt.loglog(distar,vposar)
plt.loglog(distar,vantar)
plt.loglog(distar,vleftar)
plt.loglog(distar,vrightar)
plt.loglog(dd,1/dd,dd,1/dd**2,dd,1/dd**3)
ax.set_title('Decay away from centroid')
ax.set_ylabel('Velocity in m/s')
ax.set_xlabel('mm')
plt.show()
Here is the mat file I am importing .mat file
I want the decay lines to be overlaid with the data so it's easy to see the decay of each line on the plot.
To shift the decay functions you need to multiply them with something smaller than one, not subtract, since log(A * 1 / x^2) = log(A) - 2 * log(x), i.e. by selecting appropriate values for A you can shift them up and down as you please. In practice this would look like this:
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(1, 10)
y = 1 / x**2
y2 = 0.1 * 1 / x**2
plt.loglog(x, y, label="1/x2")
plt.loglog(x, y2, label="A + 1/x2")
plt.legend()
If these lines are supposed to be just guides to the eye, that should suffice. Otherwise you should probably do a fit to your actual data.

Efficient stitching of datasets

I have multiple measurement datasets that I want to combine to a single dataset. While I have a working solution, it is terribly inefficient and I would be happy for some tips on how I can improve it.
Think of the measurements as multiple height maps of one object that I want to combine to a single height map. My measurements are not perfect and may have some tilt and height offset. Let's assume (for now) that we know the x-y position perfectly accurate. Here is an example:
import numpy as np
import matplotlib.pyplot as plt
def height_profile(x, y):
radius = 100
return np.sqrt(radius**2-x**2-y**2)-radius
np.random.seed(123)
datasets = {}
# DATASET 1
x = np.arange(-8, 2.01, 0.1)
y = np.arange(-3, 7.01, 0.1)
xx, yy = np.meshgrid(x, y)
# height is the actual profile + noise
zz = height_profile(xx, yy) + np.random.randn(*xx.shape)*0.001
datasets[1] = [xx, yy, zz]
plt.figure()
plt.pcolormesh(*datasets[1])
plt.colorbar()
# DATASET 2
x = np.arange(-2, 8.01, 0.1)
y = np.arange(-3, 7.01, 0.1)
xx, yy = np.meshgrid(x, y)
# height is the actual profile + noise + random offset + random tilt
zz = height_profile(xx, yy) + np.random.randn(*xx.shape)*0.001 + np.random.rand() + np.random.rand()*xx*0.1 + np.random.rand()*yy*0.1
datasets[2] = [xx, yy, zz]
plt.figure()
plt.pcolormesh(*datasets[2])
plt.colorbar()
# DATASET 3
x = np.arange(-5, 5.01, 0.1)
y = np.arange(-7, 3.01, 0.1)
xx, yy = np.meshgrid(x, y)
# height is the actual profile + noise + random offset + random tilt
zz = height_profile(xx, yy) + np.random.randn(*xx.shape)*0.001 + np.random.rand() + np.random.rand()*xx*0.1 + np.random.rand()*yy*0.1
datasets[3] = [xx, yy, zz]
plt.figure()
plt.pcolormesh(*datasets[3])
plt.colorbar()
To combine the three (or more) datasets, I have the following strategy: Find the overlap between the datasets, calculate the summed-up height difference between datasets in the overlap regions (residual_overlap) and try to minimize the height differences (residual) using lmfit. To apply the transformations on the dataset (tilt, offset, etc.) I have a dedicated function.
from lmfit import minimize, Parameters
from copy import deepcopy
from itertools import combinations
from scipy.interpolate import griddata
def data_transformation(dataset, idx, params):
dataset = deepcopy(dataset)
if 'x_offset_{}'.format(idx) in params:
x_offset = params['x_offset_{}'.format(idx)].value
else:
x_offset = 0
if 'y_offset_{}'.format(idx) in params:
y_offset = params['y_offset_{}'.format(idx)].value
else:
y_offset = 0
if 'tilt_x_{}'.format(idx) in params:
x_tilt = params['tilt_x_{}'.format(idx)].value
else:
x_tilt = 0
if 'tilt_y_{}'.format(idx) in params:
y_tilt = params['tilt_y_{}'.format(idx)].value
else:
y_tilt = 0
if 'piston_{}'.format(idx) in params:
piston = params['piston_{}'.format(idx)].value
else:
piston = 0
_x = dataset[0] - np.mean(dataset[0])
_y = dataset[1] - np.mean(dataset[1])
dataset[0] = dataset[0] + x_offset
dataset[1] = dataset[1] + y_offset
dataset[2] = dataset[2] + 2 * (x_tilt * _x + y_tilt * _y) + piston
return dataset
def residual_overlap(dataset_0, dataset_1):
xy_0 = np.stack((dataset_0[0].flatten(), dataset_0[1].flatten()), axis=1)
xy_1 = np.stack((dataset_1[0].flatten(), dataset_1[1].flatten()), axis=1)
difference = griddata(xy_0, dataset_0[2].flatten(), xy_1) - \
dataset_1[2].flatten()
return difference
def residual(params, datasets):
datasets = deepcopy(datasets)
for idx in datasets:
datasets[idx] = data_transformation(
datasets[idx], idx, params)
residuals = []
for combination in combinations(list(datasets), 2):
residuals.append(residual_overlap(
datasets[combination[0]], datasets[combination[1]]))
residuals = np.concatenate(residuals)
residuals[np.isnan(residuals)] = 0
return residuals
def minimize_datasets(params, datasets, **minimizer_kw):
minimize_fnc = lambda *args, **kwargs: residual(*args, **kwargs)
datasets = deepcopy(datasets)
min_result = minimize(minimize_fnc, params,
args=(datasets, ), **minimizer_kw)
return min_result
I run the "stitching" like this:
params = Parameters()
params.add('tilt_x_2', 0)
params.add('tilt_y_2', 0)
params.add('piston_2', 0)
params.add('tilt_x_3', 0)
params.add('tilt_y_3', 0)
params.add('piston_3', 0)
fit_result = minimize_datasets(params, datasets)
plt.figure()
plt.pcolormesh(*data_transformation(datasets[1], 1, fit_result.params), alpha=0.3, vmin=-0.5, vmax=0)
plt.pcolormesh(*data_transformation(datasets[2], 2, fit_result.params), alpha=0.3, vmin=-0.5, vmax=0)
plt.pcolormesh(*data_transformation(datasets[3], 3, fit_result.params), alpha=0.3, vmin=-0.5, vmax=0)
plt.colorbar()
As you can see, it does work, but the stitching takes about a minute for these small datasets on my computer. In reality I have more and bigger datasets.
Do you see a way to improve the stitching performance?
Edit: As suggested, I ran a profiler and it shows that 99.5% of the time is spent in the griddata function. That one is used to interpolate datapoints from dataset_0 to the locations of dataset_1. If I switch method to "nearest", the execution time drops to about a second, but then there is no interpolation happening. Any chance to improve the speed of the interpolation?
Skimming through the code, I can't really see anywhere to improve other than you are running deepcopy() over and over again.
However, I would recommend you to do profiling. If you are using pycharm, you can do profiling using the clock/run sign.
I am sure other IDEs also have such capabilities. This way you can figure out which function is taking the most time.
Whole graph:
When I zoom in to a few functions (I am showing google cloud functions):
You can see how many times they are called and how long they took etc.
Long story short, you need a profiler!

Plot arbitrary paths with constant width given in data coordinates

General aim
I am trying to write some plotting functionality that (at its core)
plots arbitrary paths with a constant width given in data coordinates
(i.e. unlike lines in matplotlib which have widths given in display coordinates).
Previous solutions
This answer achieves
the basic goal. However, this answer converts between display and data
coordinates and then uses a matplotlib line with adjusted
coordinates. The existing functionality in my code that I would like
to replace / extend inherits from matplotlib.patches.Polygon. Since
the rest of the code base makes extensive use of
matplotlib.patches.Polygon attributes and methods, I would like to
continue to inherit from that class.
Problem
My current implementation (code below) seems to come close. However,
the patch created by simple_test seems to be subtly thicker towards
the centre than it is at the start and end point, and I have no
explanation why that may be the case.
I suspect that the problem lies in the computation of the orthogonal vector.
As supporting evidence, I would like to point to the start and end points of the patch in the figure created by complicated_test, which do not seem exactly orthogonal to the path. However, the dot product of the orthonormal vector and the tangent vector is always zero, so I am not sure that what is going on here.
Output of simple_test:
Output of complicated_test:
Code
#!/usr/bin/env python
import numpy as np
import matplotlib.patches
import matplotlib.pyplot as plt
class CurvedPatch(matplotlib.patches.Polygon):
def __init__(self, path, width, *args, **kwargs):
vertices = self.get_vertices(path, width)
matplotlib.patches.Polygon.__init__(self, list(map(tuple, vertices)),
closed=True,
*args, **kwargs)
def get_vertices(self, path, width):
left = _get_parallel_path(path, -width/2)
right = _get_parallel_path(path, width/2)
full = np.concatenate([left, right[::-1]])
return full
def _get_parallel_path(path, delta):
# initialise output
offset = np.zeros_like(path)
# use the previous and the following point to
# determine the tangent at each point in the path;
for ii in range(1, len(path)-1):
offset[ii] += _get_shift(path[ii-1], path[ii+1], delta)
# handle start and end points
offset[0] = _get_shift(path[0], path[1], delta)
offset[-1] = _get_shift(path[-2], path[-1], delta)
return path + offset
def _get_shift(p1, p2, delta):
# unpack coordinates
x1, y1 = p1
x2, y2 = p2
# get orthogonal unit vector;
# adapted from https://stackoverflow.com/a/16890776/2912349
v = np.r_[x2-x1, y2-y1] # vector between points
v = v / np.linalg.norm(v) # unit vector
w = np.r_[-v[1], v[0]] # orthogonal vector
w = w / np.linalg.norm(w) # orthogonal unit vector
# check that vectors are indeed orthogonal
assert np.isclose(np.dot(v, w), 0.)
# rescale unit vector
dx, dy = delta * w
return dx, dy
def simple_test():
x = np.linspace(-1, 1, 1000)
y = np.sqrt(1. - x**2)
path = np.c_[x, y]
curve = CurvedPatch(path, 0.1, facecolor='red', alpha=0.5)
fig, ax = plt.subplots(1,1)
ax.add_artist(curve)
ax.plot(x, y) # plot path for reference
plt.show()
def complicated_test():
random_points = np.random.rand(10, 2)
# Adapted from https://stackoverflow.com/a/35007804/2912349
import scipy.interpolate as si
def scipy_bspline(cv, n=100, degree=3, periodic=False):
""" Calculate n samples on a bspline
cv : Array ov control vertices
n : Number of samples to return
degree: Curve degree
periodic: True - Curve is closed
"""
cv = np.asarray(cv)
count = cv.shape[0]
# Closed curve
if periodic:
kv = np.arange(-degree,count+degree+1)
factor, fraction = divmod(count+degree+1, count)
cv = np.roll(np.concatenate((cv,) * factor + (cv[:fraction],)),-1,axis=0)
degree = np.clip(degree,1,degree)
# Opened curve
else:
degree = np.clip(degree,1,count-1)
kv = np.clip(np.arange(count+degree+1)-degree,0,count-degree)
# Return samples
max_param = count - (degree * (1-periodic))
spl = si.BSpline(kv, cv, degree)
return spl(np.linspace(0,max_param,n))
x, y = scipy_bspline(random_points, n=1000).T
path = np.c_[x, y]
curve = CurvedPatch(path, 0.1, facecolor='red', alpha=0.5)
fig, ax = plt.subplots(1,1)
ax.add_artist(curve)
ax.plot(x, y) # plot path for reference
plt.show()
if __name__ == '__main__':
plt.ion()
simple_test()
complicated_test()

Graphing polynomials

With some help I have produced the following code. Below are some of the desired outputs for given inputs. However I am having some trouble completing the last task of this code. Looking for some help with this, any guidance or help is greatly appreciated, thanks!
flops = 0
def add(x1, x2):
global flops
flops += 1
return x1 + x2
def multiply(x1, x2):
global flops
flops += 1
return x1 * x2
def poly_horner(A, x):
global flops
flops = 0
p = A[-1]
i = len(A) - 2
while i >= 0:
p = add(multiply(p, x), A[i])
i -= 1
return p
def poly_naive(A, x):
global flops
p = 0
flops = 0
for i, a in enumerate(A):
xp = 1
for _ in range(i):
xp = multiply(xp, x)
p = add(p, multiply(xp, a))
return p
Given the following inputs, I got the following outputs:
poly_horner([1,2,3,4,5], 2)
129
print(flops)
8
poly_naive([1,2,3,4,5, 2])
129
print(flops)[![enter image description here][1]][1]
20
np.polyval([5,4,3,2,1], 2)
129
I assume you want to create a figure, though your question is quite vague...but I have a few minutes to kill while my code runs. Anyway, it seems you MIGHT be having difficulty plotting.
import numpy as np
import pylab as pl
x = np.arange(10)
y = x * np.pi
# you can calculate a line of best fit (lobf) using numpy's polyfit function
lobf1 = np.polyfit(x, y, 1) # first degree polynomial
lobf2 = np.polyfit(x, y, 2) # second degree polynomial
lobf3 = np.polyfit(x, y, 3) # third degree polynomial
# you can now use the lines of best fit to calculate the
# value anywhere within the domain using numpy's polyval function
# FIRST, create a figure and a plotting axis within the fig
fig = pl.figure(figsize=(3.25, 2.5))
ax0 = fig.add_subplot(111)
# now use polyval to calculate your y-values at every x
x = np.arange(0, 20, 0.1)
ax0.plot(x, np.polyval(lobf1, x), 'k')
ax0.plot(x, np.polyval(lobf2, x), 'b')
ax0.plot(x, np.polyval(lobf3, x), 'r')
# add a legend for niceness
ax0.legend(('Degree 1', 'Degree 2', 'Degree 3'), fontsize=8, loc=2)
# you can label the axes whatever you like
ax0.set_ylabel('My y-label', fontsize=8)
ax0.set_xlabel('My x-label', fontsize=8)
# you can show the figure on your screen
fig.show()
# and you can save the figure to your computer in different formats
# specifying bbox_inches='tight' helps eliminate unnecessary whitespace around
# the axis when saving...it just looks better this way.
pl.savefig('figName.png', dpi=500, bbox_inches='tight')
pl.savefig('figName.pdf', bbox_inches='tight')
# don't forget to close the figure
pl.close('all')

Categories