Using pydot with networkx - python

I am trying to plot the following list as a graph with edge thickness proportional to the weights.
g_list=[('Alpha', 'Alpha', 7.06), ('Alpha', 'Bravo', 0.98), ('Alpha', 'Charlie', 0.0), ('Alpha', 'Delta', 0.0), ('Alpha', 'Echo', 1.57), ('Alpha', 'Foxtrot', 2.16), ('Alpha', 'Golf', 1.57), ('Alpha', 'Hotel', 0.39), ('Alpha', 'India', 0.0), ('Alpha', 'Juliet', 0.2), ('Alpha', 'Kilo', 0.59), ('Bravo', 'Alpha', 1.66), ('Bravo', 'Bravo', 8.54), ('Bravo', 'Charlie', 1.21), ('Bravo', 'Delta', 1.78), ('Bravo', 'Echo', 0.25), ('Bravo', 'Foxtrot', 0.76), ('Bravo', 'Golf', 1.66), ('Bravo', 'Hotel', 1.59), ('Bravo', 'India', 2.87), ('Bravo', 'Juliet', 1.72), ('Bravo', 'Kilo', 1.27), ('Charlie', 'Alpha', 1.0), ('Charlie', 'Bravo', 2.5), ('Charlie', 'Charlie', 7.0), ('Charlie', 'Delta', 5.0), ('Charlie', 'Echo', 0.0), ('Charlie', 'Foxtrot', 0.5), ('Charlie', 'Golf', 3.0), ('Charlie', 'Hotel', 0.0), ('Charlie', 'India', 0.5), ('Charlie', 'Juliet', 2.5), ('Charlie', 'Kilo', 1.5)]
The following code works but is not pretty
import networkx as nx
G=nx.Graph()
for i in range(len(g_list)):
if((g_list[i][0] != g_list[i][1]) and (g_list[i][2] != 0.0)):
G.add_edge(g_list[i][0],g_list[i][1],weight=g_list[i][2])
pos = nx.spring_layout(G)
for edge in G.edges(data='weight'):
nx.draw_networkx_edges(G, pos, edgelist=[edge], width=edge[2])
nx.draw_networkx(G, pos, with_labels=True, arrows=True, arrowstyle='<-', alpha=1, node_color='#ffffff')
plt.axis('off')
plt.savefig('graph.jpg')
The sort of presentation I'm looking for can be obtained using pydot as folllows
G=nx.DiGraph()
for i in range(len(g_list)):
if((g_list[i][0] != g_list[i][1]) and (g_list[i][2] != 0.0)):
G.add_edge(g_list[i][1],g_list[i][0],weight=g_list[i][2])
p=nx.drawing.nx_pydot.to_pydot(G)
p.write_png('graph.png')
This is a better looking graph but when I try to add the variable thickness edges back using
pos = nx.spring_layout(G)
for edge in G.edges(data='weight'):
nx.draw_networkx_edges(G, pos, edgelist=[edge], width=edge[2])
p=nx.drawing.nx_pydot.to_pydot(G)
I end up with the first graph again. Is there any way of combining the two approaches so that I get the layout of pydot and control over the drawing of the network edges? I have tried the following
pos=nx.nx_pydot.pydot_layout(G, prog='dot')
nx.draw_networkx(G, pos, with_labels=True, arrows=True, arrowstyle='<-', alpha=1, node_color='#ffffff')
for edge in G.edges(data='weight'):
nx.draw_networkx_edges(G, pos, edgelist=[edge], width=edge[2])
with the following result, but still not the clear layout I get in the second graph.

If you want to use GraphViz's dot to render your graph with varying edge line width, you'll need to convert the weight to a penwidth attribute that GraphViz understands.
I found using the actual weight made things way too thick, so here's something that takes the square root of the weight.
Note you can use add_weighted_edges_from to convert your data in one fell swoop, too.
import math
import networkx as nx
from networkx.drawing.nx_pydot import to_pydot
g_list = [
("Alpha", "Alpha", 7.06),
("Alpha", "Bravo", 0.98),
("Alpha", "Charlie", 0.0),
("Alpha", "Delta", 0.0),
("Alpha", "Echo", 1.57),
("Alpha", "Foxtrot", 2.16),
("Alpha", "Golf", 1.57),
("Alpha", "Hotel", 0.39),
("Alpha", "India", 0.0),
("Alpha", "Juliet", 0.2),
("Alpha", "Kilo", 0.59),
("Bravo", "Alpha", 1.66),
("Bravo", "Bravo", 8.54),
("Bravo", "Charlie", 1.21),
("Bravo", "Delta", 1.78),
("Bravo", "Echo", 0.25),
("Bravo", "Foxtrot", 0.76),
("Bravo", "Golf", 1.66),
("Bravo", "Hotel", 1.59),
("Bravo", "India", 2.87),
("Bravo", "Juliet", 1.72),
("Bravo", "Kilo", 1.27),
("Charlie", "Alpha", 1.0),
("Charlie", "Bravo", 2.5),
("Charlie", "Charlie", 7.0),
("Charlie", "Delta", 5.0),
("Charlie", "Echo", 0.0),
("Charlie", "Foxtrot", 0.5),
("Charlie", "Golf", 3.0),
("Charlie", "Hotel", 0.0),
("Charlie", "India", 0.5),
("Charlie", "Juliet", 2.5),
("Charlie", "Kilo", 1.5),
]
graph = nx.DiGraph()
# Add edges, but reverse direction, remove self-loops, and remove zero-weight edges
graph.add_weighted_edges_from([(b, a, w) for (a, b, w) in g_list if w > 0 and a != b])
for edge in graph.edges().values():
edge["penwidth"] = round(1 + math.sqrt(edge["weight"]), 2)
p = to_pydot(graph)
p.write_png("graph.png")
The output is

Related

Matplotlib Imshow Showing the Wrong Colours on Update

I'm trying to make a heatmap over time, but I think matplotlib is messing with the plot colours.
My code is based on the heat equation, I think the specs are not important, the main thing is that I am creating a 3D array and plotting a slice from that array (a 2D matrix), setting which slice I plot using the matplotlib widget Slider.
The important part of the code is this:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib.colors import LogNorm
def update(val):
newdata = mat[:,:,int(val)]
plot.set_data(newdata)
plt.title(f'{val}')
plt.draw()
def init_plot():
global plot
fig, ax = plt.subplots()
flukacolours = [(1.0, 1.0, 1.0), (0.9, 0.6, 0.9), (1.0, 0.4, 1.0), (0.9, 0.0, 1.0), (0.7, 0.0, 1.0), (0.5, 0.0, 0.8), (0.0, 0.0, 0.8),
(0.0, 0.0, 1.0), (0.0, 0.6, 1.0), (0.0, 0.8, 1.0), (0.0, 0.7, 0.5), (0.0, 0.9, 0.2), (0.5, 1.0, 0.0), (0.8, 1.0, 0.0),
(1.0, 1.0, 0.0), (1.0, 0.8, 0.0), (1.0, 0.5, 0.0), (1.0, 0.0, 0.0), (0.8, 0.0, 0.0), (0.6, 0.0, 0.0), (0.0, 0.0, 0.0)]
cmap_name = 'fluka'
cm = colors.LinearSegmentedColormap.from_list(cmap_name, flukacolours, N=30)
plot = plt.imshow(mat[:,:,0], cmap=cm, norm=LogNorm(vmin=mat.min(), vmax=mat.max()), aspect='auto')
ax = plot.axes
cbar = plt.colorbar(plot, ax=ax)
plt.subplots_adjust(left=0.10, bottom=0.15, right=1, top=0.9)
axfreq = plt.axes([0.10, 0.02, 0.8, 0.03])
freq_slider = Slider(ax=axfreq, label='Slice', valmin=0, valmax=mat.shape[2], valinit=0, valstep=1, orientation='horizontal')
freq_slider.on_changed(update)
plt.show()
if __name__ == "__main__":
mat = crazy_function() # This function returns a 3D np.array
init_plot()
The problem is seen in some slices of the plot, where the colours just... break. In the images below I am showing the differences between 3 consecutive slices. At this point, I thought the problem was in my crazy_function(), but then I noticed the graph value that appears in the upper right corner when you place the cursor inside the chart.
Trying to place the cursor at the same maximum point for each plot, the 36th slice is showing a green tint, which would mean a value in the order 10⁻¹⁶ (as shown in colorbar), but the cursor value shows 7x10⁻⁸, which is the right value of the array that matplotlib is not showing correctly.
.
I think the problem might be my custom colour scale, or more likely the absurdly large scale of the colorbar. Because changing the scale vmin and vmax in the plt.imshow, the colour break tends to decrease and even stop. Which is not a problem, I even prefer a shorter scale to visualize the data, but I was really curious about the cause of this problem.
If you know the answer, I'd love to know. In case it matters, my current version of matplotlib is 3.5.1.

Plotting with errorbars in Python

I have a list, the form is [[(x1, y1), (x2, y2), (x3, y3)], [...], [...]]
[[(10.0, -1.0), (7.0, 0.05889647076017157), (13.0, 0.47096776983628086)], [(10.5, -1.0), (13.0, 0.07080679131269396), (7.5, 0.16547229577841294)], [(11.0, -1.0), (8.0, 0.27471205881135075), (13.5, 0.682988382311833)]]
I would like to extract the first index in tuples from the list.
For example, the list above would be -> [[(10.0), (7.0), (13.0)], [(10.5), (13.0), (7.5)], [(11.0), (8.0), (13.5)]] (The form: [[(x1), (x2), (x3)], [(x4), (x5), (x6)], [(x7), (x8), (x9)]]
and then turn into a plot with errorbars. (The first value in the tuples would be the main values, and other two values would be errors)
This is what I am trying to get:
How can I do this? I can't find any similar example online.
You can get the list out in this way:
vals = [[i[0] for i in tup] for tup in lst ]
vals
[[10.0, 7.0, 13.0], [10.5, 13.0, 7.5], [11.0, 8.0, 13.5]]
To plot, it's easier to have it in a np matrix, and sorted because the error bar function needs the length as input and not the coordinate:
import numpy as np
import matplotlib.pyplot as plt
vals = np.sort(np.array(vals))
vals[:,[0,2]] = vals[:,[0,2]] - vals[:,1].reshape(-1,1)
vals
array([[-3. , 10. , 3. ],
[-3. , 10.5, 2.5],
[-3. , 11. , 2.5]])
fig, ax = plt.subplots(1, 1)
ax.errorbar(vals[:,1], vals[:,1], yerr=[-vals[:,0],vals[:,2]], fmt='o')
plt.show()
You can try
import matplotlib.pyplot as plt
x = [[(10.0, -1.0), (7.0, 0.05889647076017157), (13.0, 0.47096776983628086)], [(10.5, -1.0), (13.0, 0.07080679131269396), (7.5, 0.16547229577841294)], [(11.0, -1.0), (8.0, 0.27471205881135075), (13.5, 0.682988382311833)]]
plt.boxplot([[j[0] for j in i] for i in x])

Create colormap from dictionary

Am using geopandas to plot a map with categorical colors. geopandas is an interface to matplotlib so the same structures should work. What I'm after is to have each key display in a specific color. Random color from a predefined cm is fine, as long as the No data can be made grey.
keys = ['A', 'B', 'C', 'D', 'No data']
color_range = list(np.linspace(0, 1, len(keys), endpoint=False))
colors = [plt.cm.Pastel1(x) for x in color_range]
color_dict = dict(zip(keys, colors))
color_dict['No data'] = 'lightgrey'
The dictionary I get from this looks correct:
{'A': (0.98, 0.70, 0.68, 1.0),
'B': (0.70, 0.80, 0.89, 1.0),
'C': (0.87, 0.79, 0.89, 1.0),
'D': (1.0, 1.0, 0.8, 1.0),
'No data': 'lightgrey'}
When I try to convert it to a colormap, I get a KeyError: 0
cmap = LinearSegmentedColormap.from_list('mycmap', color_dict)
My final plot command is:
merged_df.plot(column='GROUP', categorical=True, legend=True, cmap=cmap)

Create input arrows in networkx

I'm wondering if is possible to create some sort of notation to indicate inputs for a graph in networkx / Python.
For example, taking the image presented here, supposed I want to be able to identify sources and sinks. I would be interested in creating a representation such as:
Is that possible?
The annotate functionality does exactly what you want. (See also the annotate intro)
Given the example you linked, you could just add the following to annotate selected nodes. (It depends on the positions dictionary pos to know where the nodes are located).
ax = plt.gca()
ax.annotate("3", xy=pos.get('B'), xytext=(0, 40), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.15),
bbox=dict(boxstyle="round", fc="cyan"))
ax.annotate("4", xy=pos.get('F'), xytext=(0, 40), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.15),
bbox=dict(boxstyle="round", fc="cyan"))
So that this answer is self-contained, Full code including linked example:
import networkx as nx
import matplotlib.pyplot as plt
G = nx.DiGraph()
G.add_edges_from(
[('A', 'B'), ('A', 'C'), ('D', 'B'), ('E', 'C'), ('E', 'F'),
('B', 'H'), ('B', 'G'), ('B', 'F'), ('C', 'G')])
val_map = {'A': 1.0,
'D': 0.5714285714285714,
'H': 0.0}
values = [val_map.get(node, 0.25) for node in G.nodes()]
# Specify the edges you want here
red_edges = [('A', 'C'), ('E', 'C')]
edge_colours = ['black' if not edge in red_edges else 'red'
for edge in G.edges()]
black_edges = [edge for edge in G.edges() if edge not in red_edges]
# Need to create a layout when doing
# separate calls to draw nodes and edges
pos = nx.spring_layout(G)
nx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('jet'),
node_color = values, node_size = 500)
nx.draw_networkx_labels(G, pos)
nx.draw_networkx_edges(G, pos, edgelist=red_edges, edge_color='r', arrows=True)
nx.draw_networkx_edges(G, pos, edgelist=black_edges, arrows=False)
# add annotations for selected nodes.
ax = plt.gca()
ax.annotate("3", xy=pos.get('B'), xytext=(0, 40), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.15),
bbox=dict(boxstyle="round", fc="cyan"))
ax.annotate("4", xy=pos.get('F'), xytext=(0, 40), textcoords='offset points',
arrowprops=dict(facecolor='black', shrink=0.15),
bbox=dict(boxstyle="round", fc="cyan"))
plt.show()

Generating your own colormap in SciPy

I'm using Python and Scipy to perform some basic image manipulation. I've made the image greyscale and subtracted it from a gaussian blur of itself as a form of edge detection. Now I'd like to make it look pretty when running the .imshow() command. If I use one of the default colormaps, for instance,
matplotlib.pyplot.imshow(lena, cmap='binary')
where lena is a matrix representing my image in question, the image appears washed out with a grey background. It looks quite a lot like this.
I would like the image to appear sharper, with only two real colors, white and black, and very little (or no) grey in between.
Since none of the other default colormaps in SciPy can do this, I figured I should make my own. But I'm afraid I don't fully grasp the documentation provided by scipy.
So let's say I have the colormap from the tutorial:
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap', cdict, 256)
matplotlib.pyplot.imshow(lena, cmap=my_cmap)
How should this look if I want the colormap to be exclusively white from range 0 to .5, and exclusively black from range .5 to 1? Thanks to everyone for the help!
I would expect something like this:
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0))}
See also: http://matplotlib.org/examples/pylab_examples/custom_cmap.html
Please excuse me if I'm wrong. I never created jumps like the one you want.

Categories