How to use pandas with matplotlib to create 3D plots - python

I am struggling a bit with the pandas transformations needed to make data render in 3D on matplot lib. The data I have is usually in columns of numbers (usually time and some value). So lets create some test data to illustrate.
import pandas as pd
pattern = ("....1...."
"....1...."
"..11111.."
".1133311."
"111393111"
".1133311."
"..11111.."
"....1...."
"....1....")
# create the data and coords
Zdata = list(map(lambda d:0 if d == '.' else int(d), pattern))
Zinverse = list(map(lambda d:1 if d == '.' else -int(d), pattern))
Xdata = [x for y in range(1,10) for x in range(1,10)]
Ydata = [y for y in range(1,10) for x in range(1,10)]
# pivot the data into columns
data = [d for d in zip(Xdata,Ydata,Zdata,Zinverse)]
# create the data frame
df = pd.DataFrame(data, columns=['X','Y','Z',"Zi"], index=zip(Xdata,Ydata))
df.head(5)
Edit: This block of data is demo data that would normally come from a query on a
database that may need more cleaning and transforms before plotting. In this case data is already aligned and there are no problems aside having one more column we don't need (Zi).
So the numbers in pattern are transferred into height data in the Z column of df ('Zi' being the inverse image) and with that as the data frame I've struggled to come up with this pivot method which is 3 separate operations. I wonder if that can be better.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
Xs = df.pivot(index='X', columns='Y', values='X').values
Ys = df.pivot(index='X', columns='Y', values='Y').values
Zs = df.pivot(index='X', columns='Y', values='Z').values
ax.plot_surface(Xs,Ys,Zs, cmap=cm.RdYlGn)
plt.show()
Although I have something working I feel there must be a better way than what I'm doing. On a big data set I would imagine doing 3 pivots is an expensive way to plot something. Is there a more efficient way to transform this data ?

I guess you can avoid some steps during the preparation of the data by not using pandas (but only numpy arrays) and by using some convenience fonctions provided by numpy such as linespace and meshgrid.
I rewrote your code to do so, trying to keep the same logic and the same variable names :
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
pattern = ("....1...."
"....1...."
"..11111.."
".1133311."
"111393111"
".1133311."
"..11111.."
"....1...."
"....1....")
# Extract the value according to your logic
Zdata = list(map(lambda d:0 if d == '.' else int(d), pattern))
# Assuming the pattern is always a square
size = int(len(Zdata) ** 0.5)
# Create a mesh grid for plotting the surface
Xdata = np.linspace(1, size, size)
Ydata = np.linspace(1, size, size)
Xs, Ys = np.meshgrid(Xdata, Ydata)
# Convert the Zdata to a numpy array with the appropriate shape
Zs = np.array(Zdata).reshape((size, size))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot the surface
ax.plot_surface(Xs, Ys, Zs, cmap=cm.RdYlGn)
plt.show()

Related

matplotlib contourf plot sparsity whitespace, need interpolation?

I have a set of data captured in a pandas data frame which I would like to plot on a contourf plot. When plotting, I can see much white space in certain areas of the contour which I'm not sure how to fix. My x-data is semilog. I'm not sure if some kind of interpolation would help, or if it is someway I am generating my mesh grid and contour itself. I will attach an image and 2 sets of data frames as examples.
contourplot
Data file can be found here: https://drive.google.com/drive/folders/13aO1_P0wzLCjZSTIgalXyaR4cdW1_Rh8?usp=sharing
import os,sys
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#dev
import pprint
np.set_printoptions(threshold=sys.maxsize)
np.set_printoptions(suppress=True)
# start
Data = pd.read_csv('DF.csv',index_col=0)
plt.rcParams['figure.figsize'] = (16,10)
Freqs = Data.iloc[:,0] # Frequencies for data
angleFullset= ['{:03}'.format(x) for x in [*range(0,360,15)]] # test set, name of df cols in degrees
angleContour = [[int(x) for x in angleFullset],[int(x) if int(x) < 181 else int(x) - 360 for x in angleFullset]] # rename colum names to -180 to 180 deg
angleContour[0].append(angleContour[0][0]); angleContour[1].append(angleContour[1][0] - 1) # append 1 more column for last data set (which is same as first)
idx_180 = angleContour[1].index(180)
angleContour[0].insert(idx_180 + 1,-180); angleContour[1].insert(idx_180 + 1,-180) # insert another column after 180 to cover -180 case
[X,Y] = np.meshgrid(Freqs,angleContour[1])
fig,ax = plt.subplots(1,1)
ax.semilogx()
plt.hlines(0,20,20000,'k',linewidth=1.5) # zero axis
plt.vlines(100,-200,200,'k',linewidth=2) # 100Hz axis
plt.vlines(1000,-200,200,'k',linewidth=2) # 1kHz axis
plt.vlines(10000,-200,200,'k',linewidth=2) # 10kHz axis
plt.xlim([85,8000])
plt.ylim([-180,180])
plt.xticks([100,1000,8000],('100','1000','8000'))
plt.yticks(range(-180,181,30))
plt.xlabel('Frequency [Hz]')
plt.ylabel('Angle [deg]')
plt.grid(b=True,which='major'); plt.grid(b=True,which='minor')
plt.title('Contour')
newData = Data.copy()
newData.drop("Freq",axis=1,inplace=True)
newData['001'] = newData['000'] # for data from -345 to 0
newData.insert(newData.columns.get_loc('180')+1,'-180',newData['180']) # for data from -180 to -165
lev_min,lev_max,levels = -70,-19,range(-70,-19,1)
CM = ax.contourf(X,Y,newData.transpose(),cmap=matplotlib.cm.jet,levels=levels,vmin=lev_min,vmax=lev_max)
plt.colorbar(CM,label='Magnitude [dB]',fraction=0.1)
outputFileName = os.path.join(os.getcwd(),'Contour.png')
plt.savefig(outputFileName,orientation='landscape',format='png')
plt.clf()
plt.cla()

contour plot with mutiplile files

I have a sequence of data files which contain two columns of data (x value, and z value). I want to asign each file with a unique constant y value with a loop and then use x,y,z values to make a contour plot.
import glob
import matplotlib.pyplot as plt
import numpy as np
files=glob.glob('C:\Users\DDT\Desktop\DATA TIANYU\materials\AB2O4\synchronchron\OX1\YbFe1Mn1O4_2cyc_600_meth_ox1-*.xye')
s1=1
for file in files:
t1=s1/3
x,z = np.loadtxt(file,skiprows=3,unpack=True, usecols=[0,1])
def f(x, y):
return x*0 +y*0 +z
l1=np.size(x)
y=np.full(l1, t1,dtype=int)
X,Y=np.meshgrid(x,y)
Z = f(X,Y)
plt.contour(X,Y,Z)
s1=s1+1
continue
plt.show()
There is no error in this code, however what I got is an empty figure with nothing.
What mistake did I make?
It is very hard to guess what you're trying to do. Here is an attempt. It supposes that all x-arrays are equal. And that the y really makes sense (although that is hard if the files are read in an unspecified order). To get a useful plot, the data from all the files should be collected before starting to plot.
import glob
import matplotlib.pyplot as plt
import numpy as np
files = glob.glob('........')
zs = []
for file in files:
x, z = np.loadtxt(file, skiprows=3, unpack=True, usecols=[0, 1])
zs.append(z)
# without creating a new x, the x from the last file will be used
# x = np.linspace(0, 15, 10)
y = np.linspace(-100, 1000, len(zs))
zs = np.array(zs)
fig, axs = plt.subplots(ncols=2)
axs[0].scatter(np.tile(x, y.size), np.repeat(y, x.size), c=zs)
axs[1].contour(x, y, zs)
plt.show()
With simulated random data, the scatter plot and the contour plot would look like:

How to adjust branch lengths of dendrogram in matplotlib (like in astrodendro)? [Python]

Here is my resulting plot below but I would like it to look like the truncated dendrograms in astrodendro such as this:
There is also a really cool looking dendrogram from this paper that I would like to recreate in matplotlib.
Below is the code for generating an iris data set with noise variables and plotting the dendrogram in matplotlib.
Does anyone know how to either: (1) truncate the branches like in the example figures; and/or (2) to use astrodendro with a custom linkage matrix and labels?
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
import astrodendro
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial import distance
def iris_data(noise=None, palette="hls", desat=1):
# Iris dataset
X = pd.DataFrame(load_iris().data,
index = [*map(lambda x:f"iris_{x}", range(150))],
columns = [*map(lambda x: x.split(" (cm)")[0].replace(" ","_"), load_iris().feature_names)])
y = pd.Series(load_iris().target,
index = X.index,
name = "Species")
c = map_colors(y, mode=1, palette=palette, desat=desat)#y.map(lambda x:{0:"red",1:"green",2:"blue"}[x])
if noise is not None:
X_noise = pd.DataFrame(
np.random.RandomState(0).normal(size=(X.shape[0], noise)),
index=X_iris.index,
columns=[*map(lambda x:f"noise_{x}", range(noise))]
)
X = pd.concat([X, X_noise], axis=1)
return (X, y, c)
def dism2linkage(DF_dism, method="ward"):
"""
Input: A (m x m) dissimalrity Pandas DataFrame object where the diagonal is 0
Output: Hierarchical clustering encoded as a linkage matrix
Further reading:
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.cluster.hierarchy.linkage.html
https://pypi.python.org/pypi/fastcluster
"""
#Linkage Matrix
Ar_dist = distance.squareform(DF_dism.as_matrix())
return linkage(Ar_dist,method=method)
# Get data
X_iris_with_noise, y_iris, c_iris = iris_data(50)
# Get distance matrix
df_dism = 1- X_iris_with_noise.corr().abs()
# Get linkage matrix
Z = dism2linkage(df_dism)
#Create dendrogram
with plt.style.context("seaborn-white"):
fig, ax = plt.subplots(figsize=(13,3))
D_dendro = dendrogram(
Z,
labels=df_dism.index,
color_threshold=3.5,
count_sort = "ascending",
#link_color_func=lambda k: colors[k]
ax=ax
)
ax.set_ylabel("Distance")
I'm not sure this really constitutes a practical answer, but it does allow you to generate dendrograms with truncated hanging lines. The trick is to generate the plot as normal, then manipulate the resulting matplotlib plot to recreate the lines.
I couldn't get your example to work locally, so I've just created a dummy dataset.
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
import numpy as np
a = np.random.multivariate_normal([0, 10], [[3, 1], [1, 4]], size=[5,])
b = np.random.multivariate_normal([0, 10], [[3, 1], [1, 4]], size=[5,])
X = np.concatenate((a, b),)
Z = linkage(X, 'ward')
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
dendrogram(Z, ax=ax)
The resulting plot is the usual long-arm dendrogram.
Now for the more interesting bit. A dendrogram is made up of a number of LineCollection objects (one for each colour). To update the lines we iterate through these, extracting the details about their constituent paths, modifying these to remove any lines reaching to a y of zero, and then recreating a LineCollection for these modified paths.
The updated path is then added to the axes, and the original is removed.
The one tricky part is determining what height to draw to instead of zero. Since we are iterating over each dendrograms path, we don't know which point came before — we basically have no idea where we are. However, we can exploit the fact that hanging lines hang vertically. Assuming there are no lines on the same x, we can look for the known other y values for a given x and use that as the basis for our new y when calculating. The downside is that in order to make sure we have this number, we have to pre-scan the data.
Note: If you can get dendrogram hanging lines on the same x, you would need to include the y and search for nearest y above this x to do this.
import numpy as np
from matplotlib.path import Path
from matplotlib.collections import LineCollection
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
dendrogram(Z, ax=ax);
for c in ax.collections[:]: # use [:] to get a copy, since we're adding to the same list
paths = []
for path in c.get_paths():
segments = []
y_at_x = {}
# Pre-pass over all elements, to find the lowest y value at each x value.
# we can use this to caculate where to cut our lines.
for n, seg in enumerate(path.iter_segments()):
x, y = seg[0]
# Don't store if the y is zero, or if it's higher than the current low.
if y > 0 and y < y_at_x.get(x, np.inf):
y_at_x[x] = y
for n, seg in enumerate(path.iter_segments()):
x, y = seg[0]
if y == 0:
# If we know the last y at this x, use it - 0.5, limit > 0
y = max(0, y_at_x.get(x, 0) - 0.5)
segments.append([x,y])
paths.append(segments)
lc = LineCollection(paths, colors=c.get_colors()) # Recreate a LineCollection with the same params
ax.add_collection(lc)
ax.collections.remove(c) # Remove the original LineCollection
The resulting dendrogram looks like this:

Plot multiple bars for categorical data

I'm looking for a way to plot multiple bars per value in matplotlib. For numerical data, this can be achieved be adding an offset to the X data, as described for example here:
import numpy as np
import matplotlib.pyplot as plt
X = np.array([1,3,5])
Y = [1,2,3]
Z = [2,3,4]
plt.bar(X - 0.4, Y) # offset of -0.4
plt.bar(X + 0.4, Z) # offset of 0.4
plt.show()
plt.bar() (and ax.bar()) also handle categorical data automatically:
X = ['A','B','C']
Y = [1,2,3]
plt.bar(X, Y)
plt.show()
Here, it is obviously not possible to add an offset, as the categories are not directly associated with a value on the axis. I can manually assign numerical values to the categories and set labels on the x axis with plt.xticks():,
X = ['A','B','C']
Y = [1,2,3]
Z = [2,3,4]
_X = np.arange(len(X))
plt.bar(_X - 0.2, Y, 0.4)
plt.bar(_X + 0.2, Z, 0.4)
plt.xticks(_X, X) # set labels manually
plt.show()
However, I'm wondering if there is a more elegant way that makes use of the automatic category handling of bar(), especially if the number of categories and bars per category is not known in before (this causes some fiddling with the bar widths to avoid overlaps).
There is no automatic support of subcategories in matplotlib.
Placing bars with matplotlib
You may go the way of placing the bars numerically, like you propose yourself in the question. You can of course let the code manage the unknown number of subcategories.
import numpy as np
import matplotlib.pyplot as plt
X = ['A','B','C']
Y = [1,2,3]
Z = [2,3,4]
def subcategorybar(X, vals, width=0.8):
n = len(vals)
_X = np.arange(len(X))
for i in range(n):
plt.bar(_X - width/2. + i/float(n)*width, vals[i],
width=width/float(n), align="edge")
plt.xticks(_X, X)
subcategorybar(X, [Y,Z,Y])
plt.show()
Using pandas
You may also use pandas plotting wrapper, which does the work of figuring out the number of subcategories. It will plot one group per column of a dataframe.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
X = ['A','B','C']
Y = [1,2,3]
Z = [2,3,4]
df = pd.DataFrame(np.c_[Y,Z,Y], index=X)
df.plot.bar()
plt.show()

Normalizing pandas DataFrame rows by their sums

What is the most idiomatic way to normalize each row of a pandas DataFrame? Normalizing the columns is easy, so one (very ugly!) option is:
(df.T / df.T.sum()).T
Pandas broadcasting rules prevent df / df.sum(axis=1) from doing this
To overcome the broadcasting issue, you can use the div method:
df.div(df.sum(axis=1), axis=0)
See pandas User Guide: Matching / broadcasting behavior
I would suggest to use Scikit preprocessing libraries and transpose your dataframe as required:
'''
Created on 05/11/2015
#author: rafaelcastillo
'''
import matplotlib.pyplot as plt
import pandas
import random
import numpy as np
from sklearn import preprocessing
def create_cos(number_graphs,length,amp):
# This function is used to generate cos-kind graphs for testing
# number_graphs: to plot
# length: number of points included in the x axis
# amp: Y domain modifications to draw different shapes
x = np.arange(length)
amp = np.pi*amp
xx = np.linspace(np.pi*0.3*amp, -np.pi*0.3*amp, length)
for i in range(number_graphs):
iterable = (2*np.cos(x) + random.random()*0.1 for x in xx)
y = np.fromiter(iterable, np.float)
if i == 0:
yfinal = y
continue
yfinal = np.vstack((yfinal,y))
return x,yfinal
x,y = create_cos(70,24,3)
data = pandas.DataFrame(y)
x_values = data.columns.values
num_rows = data.shape[0]
fig, ax = plt.subplots()
for i in range(num_rows):
ax.plot(x_values, data.iloc[i])
ax.set_title('Raw data')
plt.show()
std_scale = preprocessing.MinMaxScaler().fit(data.transpose())
df_std = std_scale.transform(data.transpose())
data = pandas.DataFrame(np.transpose(df_std))
fig, ax = plt.subplots()
for i in range(num_rows):
ax.plot(x_values, data.iloc[i])
ax.set_title('Data Normalized')
plt.show()

Categories