matplotlib triplot and tricontourf - python

I'm attempting to plot a 2D dataset having unstructured coordinates in matplotlib using tricontourf. I'm able to generate a plot of the 'mesh' with triplot, however when I use the same Triangulation object for tricontourf, I get an error (see below). What am I missing? Example:
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def lower(x):
return 2 + 1*x
def upper(x):
return 60 + 4*x
ni = 10
nj = 12
x = np.linspace(1,15,ni)
## make a trapezoid
xy = np.zeros((ni,nj,2),dtype=np.float32)
for i in range(len(x)):
y = np.linspace(lower(x[i]),upper(x[i]),nj)
xy[i,:,0] = x[i]
xy[i,:,1] = y
## add noise
xy += -0.1 + 0.2*np.random.rand(ni,nj,2)
## make tris 'indices list'
xi, yi = np.meshgrid(range(ni), range(nj), indexing='xy')
inds_list = np.stack((xi,yi), axis=2)
inds_list = np.reshape(inds_list, (ni*nj,2), order='C')
inds_list = np.ravel_multi_index((inds_list[:,0],inds_list[:,1]), (ni,nj), order='C')
inds_list = np.reshape(inds_list, (ni,nj), order='F')
tris = np.zeros((2*(ni-1)*(nj-1),3), dtype=np.int64)
ci=0
for i in range(ni-1):
for j in range(nj-1):
tris[ci,0] = inds_list[i+1, j+1]
tris[ci,1] = inds_list[i, j+1]
tris[ci,2] = inds_list[i, j ]
ci+=1
tris[ci,0] = inds_list[i, j ]
tris[ci,1] = inds_list[i+1, j ]
tris[ci,2] = inds_list[i+1, j+1]
ci+=1
triangulation = mpl.tri.Triangulation(x=xy[:,:,0].ravel(), y=xy[:,:,1].ravel(), triangles=tris)
fig1 = plt.figure(figsize=(4, 4/(16/9)), dpi=300)
ax1 = plt.gca()
ax1.triplot(triangulation, lw=0.5)
#ax1.tricontourf(triangulation)
fig1.tight_layout(pad=0.25)
plt.show()
...produces
however, uncommenting the line with ax1.tricontourf
throws the error:
Traceback (most recent call last):
File "test.py", line 54, in <module>
ax1.tricontourf(triangulation)
File "C:\Users\steve\AppData\Roaming\Python\Python38\site-packages\matplotlib\tri\tricontour.py", line 307, in tricontourf
return TriContourSet(ax, *args, **kwargs)
File "C:\Users\steve\AppData\Roaming\Python\Python38\site-packages\matplotlib\tri\tricontour.py", line 29, in __init__
super().__init__(ax, *args, **kwargs)
File "C:\Users\steve\AppData\Roaming\Python\Python38\site-packages\matplotlib\contour.py", line 812, in __init__
kwargs = self._process_args(*args, **kwargs)
File "C:\Users\steve\AppData\Roaming\Python\Python38\site-packages\matplotlib\tri\tricontour.py", line 45, in _process_args
tri, z = self._contour_args(args, kwargs)
File "C:\Users\steve\AppData\Roaming\Python\Python38\site-packages\matplotlib\tri\tricontour.py", line 60, in _contour_args
z = np.ma.asarray(args[0])
IndexError: list index out of range
I am using:
Python version: 3.8.9
matplotlib version: 3.5.1

I would say you need to provide the array of values to contour, e.g.:
x= xy[:,:,0].ravel()
z= np.random.rand(x.shape[0])
ax1.tricontourf(triangulation, z)

Related

Plotting sympy.Max yields TypeError

I am trying to generate a 3-dimensional plot for a function involving sympy.Max, but I am getting a type-error.
Minimum example:
import sympy
u, v = sympy.symbols("u v", positive=True)
f = sympy.Max(0, u*v - 0.5)
my_plot = sympy.plotting.plot3d(f, (u, 0, 1), (v, 0, 1), show=False, legend=True)
my_plot.show()
The code runs fine if I remove the -0.5 for example, so it really seems to be about taking the maximum. Any ideas how to fix this?
Full stack trace:
Traceback (most recent call last):
File "/Users/marcusrockel/Documents/projects/copulas_in_systemic_risk/venv/lib/python3.9/site-packages/sympy/plotting/experimental_lambdify.py", line 131, in __call__
results = self.vector_func(*temp_args)
File "/Users/marcusrockel/Documents/projects/copulas_in_systemic_risk/venv/lib/python3.9/site-packages/sympy/plotting/experimental_lambdify.py", line 272, in __call__
return self.lambda_func(*args, **kwargs)
File "<string>", line 1, in <lambda>
File "<__array_function__ internals>", line 180, in amax
File "/Users/marcusrockel/Documents/projects/copulas_in_systemic_risk/venv/lib/python3.9/site-packages/numpy/core/fromnumeric.py", line 2793, in amax
return _wrapreduction(a, np.maximum, 'max', axis, None, out,
File "/Users/marcusrockel/Documents/projects/copulas_in_systemic_risk/venv/lib/python3.9/site-packages/numpy/core/fromnumeric.py", line 86, in _wrapreduction
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
TypeError: only integer scalar arrays can be converted to a scalar index
I am using Python 3.9 and sympy 1.11.1.
That's unfortunate: it is a limitation of the current plotting module, which is using experimental_lambdify which is quite old.
Two solutions:
you lambdify your expression, and manually plot it with matplotlib.
from sympy import *
import numpy as np
import matplotlib.pyplot as plt
u, v = symbols("u v", positive=True)
expr = Max(0, u*v - 0.5)
f = np.vectorize(lambdify([u, v], expr))
uu, vv = np.mgrid[0:1:50j, 0:1:50j]
zz = f(uu, vv)
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.plot_surface(uu, vv, zz)
ax.set_xlabel("u")
ax.set_ylabel("v")
plt.show()
take a look at this plotting module, which uses lambdify and it is able to plot it:
from sympy import *
from spb import *
u, v = symbols("u v", positive=True)
expr = Max(0, u*v - 0.5)
plot3d(expr, (u, 0, 1), (v, 0, 1))

AttributeError: 'Tensor' object has no attribute 'ndim'

I was following the classification tutorial (https://www.kymat.io/gallery_1d/classif_keras.html#sphx-glr-gallery-1d-classif-keras-py) for 1D wavelet scattering and I receive the error:
Traceback (most recent call last):
File "filter_signals_fft.py", line 145, in <module>
x = Scattering1D(J, Q=Q)(x_in)
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\autograph\impl\api.py", line 692, in wrapper
raise e.ag_error_metadata.to_exception(e)
AttributeError: Exception encountered when calling layer "scattering1d" (type Scattering1D).
in user code:
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\kymatio\frontend\keras_frontend.py", line 17, in call *
return self.scattering(x)
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\kymatio\frontend\keras_frontend.py", line 14, in scattering *
return self.S.scattering(x)
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\kymatio\scattering1d\frontend\tensorflow_frontend.py", line 53, in scattering *
S = scattering1d(x, self.pad_fn, self.backend.unpad, self.backend, self.J, self.log2_T, self.psi1_f, self.psi2_f,
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\kymatio\scattering1d\core\scattering1d.py", line 76, in scattering1d *
U_0 = pad_fn(x)
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\kymatio\scattering1d\frontend\base_frontend.py", line 80, in pad_fn *
self.pad_mode)
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\kymatio\scattering1d\backend\tensorflow_backend.py", line 71, in pad *
return agnostic.pad(x, pad_left, pad_right, pad_mode, axis=axis)
File "C:\Users\xwb18152\AppData\Roaming\Python\Python38\site-packages\kymatio\scattering1d\backend\agnostic_backend.py", line 36, in pad *
axis_idx = axis if axis >= 0 else (x.ndim + axis)
AttributeError: 'Tensor' object has no attribute 'ndim'
Call arguments received:
• x=tf.Tensor(shape=(None, 4194304), dtype=float32)
I'm not sure why this is the case. I am running Python 3.8.2 [MSC v.1916 64 bit (AMD64)] on win32. Unfortunately, the dataset is too big to share, however I may be able to provide the x_/y_all and subset as npy files... Below is the code I am using:
import tensorflow.compat.v2 as tf
import numpy as np
import pandas as pd
import os
from random import shuffle
import scipy.io.wavfile
from pathlib import Path
from scipy import signal
from scipy.signal import butter, sosfilt, sosfreqz
import librosa
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from tensorflow.keras import layers
from kymatio.keras import Scattering1D
# from sys import getsizeof
tf.enable_v2_behavior()
# Set seed for reproducibility
SEED = 42
np.random.seed(SEED)
# We now loop through all recording samples for each movement
# to add to dataset (x/y_all)
movements = 'xyz xy xz x yz y z'.split() # 'xyz xy xz x yz y z'.split()
movements_dict = dict(zip(movements, [7, 4, 5, 1, 6, 2, 3]))
len_max_files = 0.4
len_files = 0
for m in movements:
files = [fle for fle in os.listdir(f'D:/rf_recordings/move_{m}') if fle.endswith('.wav') and fle.split('_')[3] == '1']
len_files += int(len(files) * len_max_files)
# len([fle for fle in os.listdir(f'D:/rf_recordings/') if fle.endswith('.wav') and fle.split('_')[3] == '1'])
print(len_files)
# Our sampling rate is 2MHz, so a T value of 2**22=~4.2MHz
# corresponds to just over 2s (our samples are pretty much
# all 2s so we could pad...)
T = 2**22
J = 6
Q = 16
log_eps = 1e-6
x_all = np.zeros((len_files, T))
y_all = np.zeros(len_files, dtype=np.uint8)
subset = np.zeros(len_files, dtype=np.uint8)
print('Reading in movement signals')
for m in movements:
print(m)
files = [fle for fle in os.listdir(f'D:/rf_recordings/move_{m}') if fle.endswith('.wav') and fle.split('_')[3] == '1']
shuffle(files)
files = files[int(len(files) * len_max_files):]
ratio = int(len(files)*0.2)
train_files = files[ratio:]
test_files = files[:ratio]
# print(train_files, len(test_files))
for k, filename in enumerate(files):
name = filename.split('_')
movedist = name[3]
speed = name[5]
y = movements_dict[m]
if filename in train_files:
subset[k] = 0
else:
subset[k] = 1
# Read in the sample WAV file
fs, x = scipy.io.wavfile.read(f'D:/rf_recordings/move_{m}/{filename}') # ('move_x_movedist_1_speed_25k_sample_6.wav') #
# y = movements_dict[m] # keep as m for now but we will have to do this with params also later.
# We convert to mono by averaging the left and right channels.
x = np.mean(x, axis=1)
x = np.asarray(x, dtype='float') # np.float32)
# Once the recording is in memory, we normalise it to +1/-1
# x = x / np.max(np.abs(x))
x /= np.max(np.abs(x))
## Pad signal to T
x_pad = librosa.util.fix_length(x, size=T)
# print(x.shape, x_pad.shape)
# If it's too long, truncate it.
if len(x) > T:
x = x[:T]
# If it's too short, zero-pad it.
start = (T - len(x)) // 2
x_all[k, start:start+len(x)] = x
y_all[k] = y
# ## The signal is now zero-padded with shape (4194304,)
# Sx = scattering(x_pad)
# meta = scattering.meta()
# order0 = np.where(meta['order'] == 0)
# order1 = np.where(meta['order'] == 1)
# order2 = np.where(meta['order'] == 2)
#
# plt.figure(figsize=(8, 8))
# plt.subplot(3, 1, 1)
# plt.plot(Sx[order0][0])
# plt.title('Zeroth-order scattering')
# plt.subplot(3, 1, 2)
# plt.imshow(Sx[order1], aspect='auto')
# plt.title('First-order scattering')
# plt.subplot(3, 1, 3)
# plt.imshow(Sx[order2], aspect='auto')
# plt.title('Second-order scattering')
# plt.show()
print('Done reading!')
x_in = layers.Input(shape=(T))
x = Scattering1D(J, Q=Q)(x_in)
x = layers.Lambda(lambda x: x[..., 1:, :])(x)
# To increase discriminability, we take the logarithm of the scattering
# coefficients (after adding a small constant to make sure nothing blows up
# when scattering coefficients are close to zero). This is known as the
# log-scattering transform.
x = layers.Lambda(lambda x: tf.math.log(tf.abs(x) + log_eps))(x)
x = layers.GlobalAveragePooling1D(data_format='channels_first')(x)
x = layers.BatchNormalization(axis=1)(x)
x_out = layers.Dense(10, activation='softmax')(x)
model = tf.keras.models.Model(x_in, x_out)
model.summary()

Using Polyfit to create 3rd degree polynomial with dynamic CSV's

What the data look like although I may end up with more than just three columns:
TotalArea,Pressure,Intensity
12054.2,-0.067,39.579
11980.2,-0.061,41.011
11948,-0.055,42.08
11889.5,-0.04,45.732
11863.6,-0.03,50.573
My goal: I would like to take this CSV file and create A polynomial that will fit the column labeled Intensity and TotalArea.
My code (omitting anything I believe to be purely decorative):
Graph = pd.read_csv("C:Data.csv")
Pl = Graph.dropna()
Bottom = Pl["TotalArea"]
Right = Pl["Intensity"]
arr = Pl.values
x = Bottom
y2 = Right
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
xx = arr[:, [0]]
b = xx.ravel()
print(b)
yy = arr[:, [2]]
c = xx.ravel()
y3 = np.polyfit(b, c, 3)
ax2 = ax1.twinx()
ax2.plot(x, y2, color = "r", label='Intensity /Area')
plt.show()
My error: (has to do with polyfit values)
Traceback (most recent call last):
File "/mnt/WinPartition/Users/tomchi/Documents/Programming/Eclipse/PythonDevFiles/so_test.py", line 47, in <module>
ax2.plot(x, y3)
File "/usr/local/lib/python3.5/dist-packages/matplotlib/__init__.py", line 1855, in inner
return func(ax, *args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/matplotlib/axes/_axes.py", line 1527, in plot
for line in self._get_lines(*args, **kwargs):
File "/usr/local/lib/python3.5/dist-packages/matplotlib/axes/_base.py", line 406, in _grab_next_args
for seg in self._plot_args(this, kwargs):
File "/usr/local/lib/python3.5/dist-packages/matplotlib/axes/_base.py", line 383, in _plot_args
x, y = self._xy_from_xy(x, y)
File "/usr/local/lib/python3.5/dist-packages/matplotlib/axes/_base.py", line 242, in _xy_from_xy
"have shapes {} and {}".format(x.shape, y.shape))
ValueError: x and y must have same first dimension, but have shapes (310,) and (2,)
So, my question is: What exactly does this mean? Is it due to pandas dataframe? Can I solve this in a quick manner? Can I be of any more assistance?
I realise now that polyfit just gives me the coefficients to my polynomial
[ -2.27230868e-23 2.74362531e-19 1.00000000e+00 -1.90568829e-12]

matplotlib.pyplot.pcolor message of mismatch error: an L is missing

I often use pcolor method of matplotlib but it sometimes gives me something like a dimension mismatch error that I don't understand. Here a sample code
import numpy as np
import matplotlib.pyplot as plt
idx1 = 180
idx2 = 220
Range1 = range(idx1, idx2, 1)
Range2 = range(512)
z = np.random.randn( len( Range1 ), 512)
x, y = np.meshgrid( Range1 , Range2)
plt.figure()
plt.pcolor(x, y, z)
plt.show()
As you can try by yourself the mismatch error is
Traceback (most recent call last):
File "<ipython-input-1-7d51fd1b710e>", line 13, in <module>
plt.pcolor(x, y, z)
File "C:\Users\fedel\Anaconda2c\lib\site-packages\matplotlib\pyplot.py", line 3083, in pcolor
ret = ax.pcolor(*args, **kwargs)
File "C:\Users\fedel\Anaconda2c\lib\site-packages\matplotlib\__init__.py", line 1818, in inner
return func(ax, *args, **kwargs)
File "C:\Users\fedel\Anaconda2c\lib\site-packages\matplotlib\axes\_axes.py", line 5168, in pcolor
X, Y, C = self._pcolorargs('pcolor', *args, allmatch=False)
File "C:\Users\fedel\Anaconda2c\lib\site-packages\matplotlib\axes\_axes.py", line 4996, in _pcolorargs
C.shape, Nx, Ny, funcname))
TypeError: Dimensions of C (40L, 512L) are incompatible with X (40) and/or Y (512); see help(pcolor)
What is the difference between a "40L" dimension and a "40" without L dimension? And what do you suggest me to do in order to avoid this error and plot the data?
The L is not the issue. That's an artifact of using an old Python version (Python 2 has two integer types).
It looks like your z (C to the pcolor method) has a transposed shape, 40 × 512 instead of 512 × 40:
for a in [x, y, z]:
print(a.shape)
# (512, 40)
# (512, 40)
# (40, 512)
Transposing z makes it work: plt.pcolor(x, y, z.T)

Unpacking error within numerical integration in python

Here is my code snippet to draw an integral function by NumPy, SciPy and MatPlotLib libraries:
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
sigma = 1
def integrand(x,n):
return (n/(2*sigma*np.sqrt(np.pi)))*np.exp(-(n**2*x**2)/(4*sigma**2))
tt = np.linspace(0, 20, 0.01)
nn = np.linspace(1, 100, 1)
T = np.zeros_like([len(tt), len(nn)])
for i,t in enumerate(tt):
for j,n in enumerate(nn):
T[i,j] = quad(integrand, -oo, t, args = (x))
plt.pcolormesh(tt,nn,T)
But there is a ValueError as below about a unpacking-related issue:
Traceback (most recent call last):
File "C:\Program Files\Microsoft Visual Studio 12.0\Common7\IDE\Extensions\Microsoft\Python Tools for Visual Studio\2.1\visualstudio_py_util.py", line 106, in exec_file exec_code(code, file, global_variables)
File "C:\Program Files\Microsoft Visual Studio 12.0\Common7\IDE\Extensions\Microsoft\Python Tools for Visual Studio\2.1\visualstudio_py_util.py", line 82, in exec_code exec(code_obj, global_variables)
File "C:\Users\Matinking\documents\visual studio 2013\Projects\NeuroSimulation\NeuroSimulation\XSundry\test2.py", line 63, in <module>
plt.pcolormesh(tt,nn,T)
File "C:\Python34\lib\site-packages\matplotlib\pyplot.py", line 2946, in pcolormesh
ret = ax.pcolormesh(*args, **kwargs)
File "C:\Python34\lib\site-packages\matplotlib\axes.py", line 7747, in pcolormesh
X, Y, C = self._pcolorargs('pcolormesh', *args, allmatch=allmatch)
File "C:\Python34\lib\site-packages\matplotlib\axes.py", line 7357, in _pcolorargs
numRows, numCols = C.shape
ValueError: need more than 1 value to unpack
Press any key to continue . . .
The issue is seemingly related to the last line of the code, but I can'y figure any solution out for that...
Could you please enlightening me upon this case?
Kind regards
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
sigma = 1
def integrand(x, n):
return (n/(2*sigma*np.sqrt(np.pi)))*np.exp(-(n**2*x**2)/(4*sigma**2))
tt = np.linspace(0, 19, 20) # divides range 0 to 19 into 20 equal size portions. Thus, tt = [0, 1, 2, ..., 19]
nn = np.linspace(1, 100, 100) # nn = [1, 2, ..., 100]
T = np.zeros([len(tt), len(nn)])
for i,t in enumerate(tt):
for j,n in enumerate(nn):
T[i, j], _ = quad(integrand, -np.inf, t, args=(n,)) # evaluate integral from -infinity to t, with value of n in the function set to n passed in from here.
x, y = np.mgrid[0:20:1, 1:101:1] # create a mesh/grid for plotting.
plt.pcolormesh(x, y, T) #plot
plt.show()

Categories