I have put together a script that creates the following; A start and end curve shape and a linear curve between.
Now what I'm wanting to do is to duplicate and transform the starting curve shape along the path (as depicted by the image), and perform a loft (preferred as would prob give the cleanest result), or alternatively, loft between the two existing curve shapes, and then deform the loft geometry to the curve. For the latter I have tried;
pm.deformer((loftShape, path), type='curveWarp', name='curveWarp#')
without success. The locators are points calculated to generate the correct
bezier curve given different distances/ starting angles. I would have thought the hard work was done, but I'm having trouble with this seemingly simple last step.
Below is a method I put together to query curve info:
def getClosestCV(x, curves, tolerance=0.0):
'''Find the closest control vertex between the given vertices, CVs, or objects and each of the given curves.
:Parameters:
x (str)(obj)(list) = Polygon vertices, control vertices, objects, or points given as (x,y,z) tuples.
curves (str)(obj)(list) = The reference object in which to find the closest CV for each vertex in the list of given vertices.
tolerance (int)(float) = Maximum search distance. Default is 0.0, which turns off the tolerance flag.
:Return:
(dict) closest vertex/cv pairs (one pair for each given curve) ex. {<vertex from set1>:<vertex from set2>}.
ex. vertices = Init.getComponents(objects, 'vertices')
closestVerts = getClosestCV(curve0, curves)
'''
pm.undoInfo(openChunk=True)
x = pm.ls(x, flatten=1) #assure x arg is a list (if given as str or single object).
npcNode = pm.ls(pm.createNode('nearestPointOnCurve'))[0] #create a nearestPointOnCurve node.
result={}
for curve in pm.ls(curves):
pm.connectAttr(curve.worldSpace, npcNode.inputCurve, force=1) #Connect the curve's worldSpace geometry to the npc node.
for i in x:
if not isinstance(i, (tuple, list, set)):
pos = pm.pointPosition(i)
else:
pos = i
pm.setAttr(npcNode.inPosition, pos)
distance = Init.getDistanceBetweenTwoPoints(pos, pm.getAttr(npcNode.position))
p = pm.getAttr(npcNode.parameter)
if not tolerance:
result[i] = p
elif distance < tolerance:
result[i] = p
pm.delete(npcNode)
pm.undoInfo(closeChunk=True)
return result
def getCvInfo(c, returnType='cv', filter_=[]):
'''Get a dict containing CV's of the given curve(s) and their corresponding point positions (based on Maya's pointOnCurve command).
:Parameters:
- c (str)(obj)(list) = Curves or CVs to get CV info from.
- returnType (str) = The desired returned values. Default is 'cv'.
valid values are:
'cv' = Return a list of all CV's for the given curves.
'count' = Return an integer representing the total number of cvs for each of the curves given.
'parameter', 'position', 'index', 'localPosition', 'tangent', 'normalizedTangent', 'normal', 'normalizedNormal', 'curvatureRadius', 'curvatureCenter'
= Return a dict with CV's as keys and the returnType as their corresponding values.
ex. {NurbsCurveCV(u'polyToCurveShape7.cv[5]'): [-12.186520865542082, 15.260936896515751, -369.6159740743584]}
- filter_ (str)(obj)(list) = Value(s) to filter for in the returned results.
:Return:
(dict)(list)(int) dependant on returnType.
ex. cv_tan = getCvInfo(curve.cv[0:2],'tangent') #get CV tangents for cvs 0-2.
ex. cvParam = getCvInfo(curve, 'parameters') #get the curves CVs and their corresponding U parameter values.
ex. filtered = getCvInfo(<curve>, 'normal', <normal>) #filter results for those that match the given value.
'''
result={}
for curve in pm.ls(c):
if '.cv' in str(curve): #if CV given.
cvs = curve
curve = pm.listRelatives(cvs, parent=1)
else: #if curve(s) given
cvs = curve.cv
parameters = Init.getClosestCV(cvs, curve) #use getClosestCV to get the parameter location for each of the curves CVs.
for cv, p in parameters.items():
if returnType is 'position': # Get cv position
v = pm.pointOnCurve(curve, parameter=p, position=True)
elif returnType is 'localPosition':
v = pm.getAttr(cv) # local cv position
elif returnType is 'tangent': # Get cv tangent
v = pm.pointOnCurve(curve, parameter=p, tangent=True)
elif returnType is 'normalizedTangent':
v = pm.pointOnCurve(curve, parameter=p, normalizedTangent=True)
elif returnType is 'normal': # Get cv normal
v = pm.pointOnCurve(curve, parameter=p, normal=True)
elif returnType is 'normalizedNormal':
v = pm.pointOnCurve(curve, parameter=p, normalizedNormal=True) #Returns the (x,y,z) normalized normal of curve1 at parameter 0.5.
elif returnType is 'curvatureRadius': # Get cv curvature
v = pm.pointOnCurve(curve, parameter=p, curvatureRadius=True) #Returns the curvature radius of curve1 at parameter 0.5.
elif returnType is 'curvatureCenter':
v = pm.pointOnCurve(curve, parameter=p, curvatureCenter=True)
elif returnType is 'parameter': # Return the CVs parameter.
v = p
elif returnType is 'count': # total number of cv's for the curve.
result[curve] = len(Init.getCvInfo(curve))
break
elif returnType is 'index': # index of the cv
s = str(cv)
v = int(s[s.index('[')+1:s.index(']')])
else:
v = None
result[cv] = v
if returnType is 'cv':
result = result.keys()
if filter_:
if not isinstance(filter_, (tuple, set, list)):
filter_ = list(filter_)
try:
result = {k:v for k,v in result.items() if any((v in filter_, v==filter_))}
except AttributeError:
result = [i for i in result if any((i in filter_, i==filter_))]
if len(result) is 1:
try:
result = result.values()[0]
except AttributeError, TypeError:
result = result[0]
return result
I ultimately decided to use the built-in MASH plugin for this. Perhaps this will be of help to someone in the future.
def duplicateAlongCurve(path, start, count=6, geometry='Instancer'):
'''Duplicate objects along a given curve using MASH.
:Parameters:
path (obj) = The curve to use as a path.
start () = Starting object.
count (int) = The number of duplicated objects. (point count on the MASH network)
geometry (str) = Particle instancer or mesh instancer (Repro node). (valid: 'Mesh' (default), 'Instancer')
:Return:
(list) The duplicated objects in order of start to end.
'''
pm.undoInfo(openChunk=1)
#create a MASH network
import MASH.api as mapi
mashNW = mapi.Network()
mashNW.MTcreateNetwork(start, geometry=geometry, hideOnCreate=False) #MASH_tools module (derived from 'createNetwork')
curveNode = pm.ls(mashNW.addNode('MASH_Curve').name)[0]
pm.connectAttr(path.worldSpace[0], curveNode.inCurves[0], force=1)
pm.setAttr(curveNode.stopAtEnd, 1) #0=off, 1=on
pm.setAttr(curveNode.clipStart, 0)
pm.setAttr(curveNode.clipEnd, 1)
pm.setAttr(curveNode.timeStep, 1)
pm.setAttr(curveNode.curveLengthAffectsSpeed, 1)
distNode = pm.ls(mashNW.distribute)[0]
pm.setAttr(distNode.pointCount, count)
pm.setAttr(distNode.amplitudeX, 0)
instNode = pm.ls(mashNW.instancer)[0]
baked_curves = mashNW.MTbakeInstancer(instNode) #MASH_tools module (derived from 'MASHbakeInstancer')
result=[start]
for curve in reversed(baked_curves):
result.append(curve)
pm.delete(mashNW.waiter.name()) #delete the MASH network.
pm.undoInfo(closeChunk=1)
return result
Related
I am using this script from here: link
I want to add new functionality to it. I want it to define starting node on the graph not only by finding the closest node (because that produces odd results like finding the closest node on an other road) but finding the closest and and the closest node along that edge.
My code is available below. I created the findnearestnodeonnearestedge function which should do the work but it doesn't work.
It finds the same node for the starting and destination point, even though they are far from each other...
I am using the newest versions of all packages so you can try the code easily.
Thank you for help
import osmnx as ox
import networkx as nx
import plotly.graph_objects as go
import numpy as np
def findnearestnodeonnearestedge(Gr, pointin):
u, v, key = ox.distance.nearest_edges(G, pointin[0], pointin[1])
n1 = Gr.nodes[u]
n2 = Gr.nodes[v]
d1 = ox.distance.euclidean_dist_vec(pointin[0], pointin[1], n1['x'], n1['y'])
d2 = ox.distance.euclidean_dist_vec(pointin[0], pointin[1], n2['x'], n2['y'])
if d1 < d2:
nodeid = u
else:
nodeid = v
return nodeid
state = ox.geocode_to_gdf('Georgia, US')
ax = ox.project_gdf(state).plot(fc='gray', ec='none')
_ = ax.axis('off')
# Defining the map boundaries
north, east, south, west = 33.798, -84.378, 33.763, -84.422
# Downloading the map as a graph object
G = ox.graph_from_bbox(north, south, east, west, network_type = 'drive')
# Plotting the map graph
ox.plot_graph(G)
# Displaying the 3rd node
list(G.nodes(data=True))[2]
# Displaying the 1st edge
list(G.edges(data=True))[1]
# Displaying the shape of edge using the geometry
list(G.edges(data=True))[1][2]['geometry']
# define origin and desination locations
origin_point = (33.787201, -84.405076)
destination_point = (33.764135, -84.394980)
# get the nearest nodes to the locations
origin_node = findnearestnodeonnearestedge(G, origin_point)
destination_node = findnearestnodeonnearestedge(G, destination_point)
# printing the closest node id to origin and destination points
origin_node, destination_node
# Finding the optimal path
route = nx.shortest_path(G, origin_node, destination_node, weight = 'length')
route
# getting coordinates of the nodes
# we will store the longitudes and latitudes in following list
long = []
lat = []
for i in route:
point = G.nodes[i]
long.append(point['x'])
lat.append(point['y'])
def plot_path(lat, long, origin_point, destination_point):
"""
Given a list of latitudes and longitudes, origin
and destination point, plots a path on a map
Parameters
----------
lat, long: list of latitudes and longitudes
origin_point, destination_point: co-ordinates of origin
and destination
Returns
-------
Nothing. Only shows the map.
"""
# adding the lines joining the nodes
fig = go.Figure(go.Scattermapbox(
name="Path",
mode="lines",
lon=long,
lat=lat,
marker={'size': 10},
line=dict(width=4.5, color='blue')))
# adding source marker
fig.add_trace(go.Scattermapbox(
name="Source",
mode="markers",
lon=[origin_point[1]],
lat=[origin_point[0]],
marker={'size': 12, 'color': "red"}))
# adding destination marker
fig.add_trace(go.Scattermapbox(
name="Destination",
mode="markers",
lon=[destination_point[1]],
lat=[destination_point[0]],
marker={'size': 12, 'color': 'green'}))
# getting center for plots:
lat_center = np.mean(lat)
long_center = np.mean(long)
# defining the layout using mapbox_style
fig.update_layout(mapbox_style="stamen-terrain", mapbox_center_lat=30, mapbox_center_lon=-80)
fig.update_layout(margin={"r": 0, "t": 0, "l": 0, "b": 0},
mapbox={
'center': {'lat': lat_center, 'lon': long_center},
'zoom': 13})
fig.show()
plot_path(lat, long, origin_point, destination_point)
# Getting the start and end node of this part
start_node=route[-7]
end_node=route[-6]
# Getting the edge connecting these nodes and storing it as a list in z to maintain the data structure of G.edges
z = []
for i in list(G.edges(data=True)):
if (i[0]==start_node) & (i[1]==end_node):
z.append(i)
z[0][2]['geometry']
def node_list_to_path(G, node_list):
"""
Given a list of nodes, return a list of lines that together follow the path
defined by the list of nodes.
Parameters
----------
G : networkx multidigraph
route : list
the route as a list of nodes
Returns
-------
lines : list of lines given as pairs ( (x_start, y_start), (x_stop, y_stop) )
"""
edge_nodes = list(zip(node_list[:-1], node_list[1:]))
lines = []
for u, v in edge_nodes:
# if there are parallel edges, select the shortest in length
data = min(G.get_edge_data(u, v).values(), key=lambda x: x['length'])
# if it has a geometry attribute (ie, a list of line segments)
if 'geometry' in data:
# add them to the list of lines to plot
xs, ys = data['geometry'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = G.nodes[u]['x']
y1 = G.nodes[u]['y']
x2 = G.nodes[v]['x']
y2 = G.nodes[v]['y']
line = [(x1, y1), (x2, y2)]
lines.append(line)
return lines
# getting the list of coordinates from the path (which is a list of nodes)
lines = node_list_to_path(G, route)
long2 = []
lat2 = []
for i in range(len(lines)):
z = list(lines[i])
l1 = list(list(zip(*z))[0])
l2 = list(list(zip(*z))[1])
for j in range(len(l1)):
long2.append(l1[j])
lat2.append(l2[j])
print("Length of lat: ", len(lat))
print("Length of lat2: ", len(lat2))
plot_path(lat2, long2, origin_point, destination_point)
Problem was that in the graph coordinates are stored in reverse order. So all pointin[0] and pointin[1] in the function should be reversed and then it will work
If you look at this DAG (directed acyclic graph):
I want to create a dict which maps the distance from the lowest node(s) to all others nodes which is similar to the x position (height) of each node from the bottom in the rendered graph.
For that given graph it would be:
distance_nodes_map: {
0: {'base-zero', 'base-one'},
1: {'low-b', 'low-a', 'low-c'},
3: {'high-x', 'high-z', 'high-y'},
2: {'mid-r', 'mid-q', 'mid-p'},
4: {'super'}
}
I wrote an algorithm which worked for that graph above but then I've tested another graph and it didn't work anymore. I tried some algorithms and functions like shortest path or descendants_at_distance but I don't think they are really helpful as an input to calculate the distances.
My algorithm doesn't work for instance for this graph:
https://gist.github.com/timaschew/3b08a07243fa6f43773014ef5e705c96
Here is gist which contains:
a python script which reads a YAML file, the dependency/graph structure and generates a HTML with a rendered mermaid graph (I've removed my algorithm to calculate the distances in a wrong way)
both graphs which are shown here, as a YAML file
You are looking for an algorithm that draws a layered graph. There are many different algorithms, and you should choose the one that best fit your needs (for example, have a look at the following paper A Technique for Drawing Directed Graphs by Gansner et al.).
Many of those algorithms are already implemented in Graphviz (a very famous and powerful graph visualization software). Once you have installed it, it's pretty straightforward to compute the result you are looking for (G is your directed acyclic graph built using networkx.DiGraph):
from networkx.drawing.nx_agraph import graphviz_layout
def get_distance_nodes_map(G):
pos = graphviz_layout(G, prog='dot')
coor = sorted({y for k, (x, y) in pos.items()})
kmap = dict(zip(coor, range(len(coor))))
distance_nodes_map = {level: set() for level in kmap.values()}
for k, (x, y) in pos.items():
distance_nodes_map[kmap[y]].add(k)
return distance_nodes_map
Here are a couple of examples using data that you provided:
>>> from networkx import DiGraph
>>> from pprint import PrettyPrinter
>>> pp = PrettyPrinter()
>>> G1 = DiGraph()
>>> G1.add_edges_from([('super', 'high-x'), ('high-x', 'mid-p'),
... ('mid-p', 'low-b'), ('mid-p', 'low-c'),
... ('low-c', 'base-zero'), ('low-c', 'base-one'),
... ('high-y', 'mid-p'), ('high-y', 'base-zero'),
... ('high-z', 'base-one'), ('high-z', 'mid-r'),
... ('high-z', 'mid-q'), ('mid-q', 'low-a'),
... ('low-a', 'base-one')])
>>> pp.pprint(get_distance_nodes_map(G1))
{0: {'base-one', 'base-zero'},
1: {'low-a', 'low-b', 'low-c'},
2: {'mid-p', 'mid-r', 'mid-q'},
3: {'high-y', 'high-x', 'high-z'},
4: {'super'}}
>>> G2 = DiGraph()
>>> G2.add_edges_from([('n10', 'n11'), ('n11', 'n12'), ('n12', 'n13'),
... ('n13', 'n14'), ('n20', 'n14'), ('n20', 'n21'),
... ('n21', 'n22'), ('n22', 'n23'), ('n30', 'n23'),
... ('n30', 'n31'), ('n31', 'n32')])
>>> pp.pprint(get_distance_nodes_map(G2))
{0: {'n32'},
1: {'n31', 'n23'},
2: {'n30', 'n22'},
3: {'n21', 'n14'},
4: {'n13', 'n20'},
5: {'n12'},
6: {'n11'},
7: {'n10'}}
Untested pseudo-code because my lunch break is nearly over:
You have a multi-root tree, with one chosen principal root.
For each root, create a subgraph consisting of all reachable nodes for that root.
Starting with the principal root (root a), compute the distance/shortest path length to the root for all nodes in the corresponding subgraph A.
Find all subgraphs that share at least one node with the principal subgraph, and select the subgraph (subgraph B) that has the node (node x) with the smallest distance to the principal root.
Compute the distance to the root b for all nodes in subgraph B. Add the distance d(node x, root a). Subtract the distance d(node x, root b).
Create the union of subgraph A and B. Repeat steps 3-5 until no roots remain.
Subtract the maximum distance & reverse the sign such that principal root has the largest distance/order value.
Edit:
My pseudocode works (*). I blame user error. ;-)
#!/usr/bin/env python
"""
https://stackoverflow.com/q/66584661/
"""
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
def hierarchical_layout(graph):
longest_path = nx.algorithms.dag.dag_longest_path(graph)
principal_root = longest_path[0]
roots = [node for node, degree in list(graph.in_degree) if degree==0]
subgraphs = {root : create_subgraph(graph, root) for root in roots}
# Starting with the principal root (root a), compute the
# longest path length to the root for all nodes in the
# corresponding subgraph A.
node_to_level = single_source_longest_dag_path_length(subgraphs[principal_root], principal_root)
explored = subgraphs[principal_root]
del subgraphs[principal_root]
while len(explored) < len(graph):
# Find all subgraphs that share at least one node with the
# principal subgraph, and select the subgraph (subgraph B) that
# has the node (node x) with the smallest distance to the
# principal root.
minimum_cost = np.inf
minimum_cost_node = None
minimum_cost_root = None
for root, subgraph in subgraphs.items():
for node in subgraph.nodes:
if node in node_to_level:
if node_to_level[node] < minimum_cost:
minimum_cost = node_to_level[node]
minimum_cost_node = node
minimum_cost_root = root
assert minimum_cost_node, "Could not find a connected subgraph."
# Compute the distance to the root b for all nodes in subgraph
# B. Add the distance d(node x, root a). Subtract the distance
# d(node x, root b).
path_lengths = [len(path) for path in nx.all_simple_paths(subgraphs[minimum_cost_root], minimum_cost_root, minimum_cost_node)]
offset = np.max(path_lengths) - 1
for node, distance in single_source_longest_dag_path_length(subgraphs[minimum_cost_root], minimum_cost_root).items():
if not node in node_to_level:
node_to_level[node] = distance + minimum_cost - offset
# Create the union of subgraph A and B.
explored = nx.compose(explored, subgraphs[minimum_cost_root])
del subgraphs[minimum_cost_root]
return node_to_level
def create_subgraph(G, node):
# https://stackoverflow.com/a/45678930/2912349
nodes = nx.single_source_shortest_path(G,node).keys()
return G.subgraph(nodes)
def single_source_longest_dag_path_length(graph, s):
# from AlaskaJoslin's comment to https://stackoverflow.com/a/60978007/2912349
dist = dict.fromkeys(graph.nodes, -float('inf'))
dist[s] = 0
topo_order = nx.topological_sort(graph)
for n in topo_order:
for s in graph.successors(n):
if dist[s] < dist[n] + 1:
dist[s] = dist[n] + 1
return dist
if __name__ == '__main__':
# edge_list = [
# ("n10", "n11"),
# ("n11", "n12"),
# ("n12", "n13"),
# ("n13", "n14"),
# ("n20", "n21"),
# ("n20", "n14"),
# ("n21", "n22"),
# ("n22", "n23"),
# ("n30", "n23"),
# ("n30", "n31"),
# ("n31", "n32"),
# ]
edge_list = [
("low-a", "base-one"),
("low-c", "base-zero"),
("low-c", "base-one"),
("mid-p", "low-b"),
("mid-p", "low-c"),
("mid-q", "low-a"),
("high-x", "mid-p"),
("high-y", "mid-p"),
("high-y", "base-zero"),
("high-z", "mid-q"),
("high-z", "mid-r"),
("high-z", "base-one"),
("super", "high-x"),
]
graph = nx.DiGraph()
graph.add_edges_from(edge_list)
node_to_level = hierarchical_layout(graph)
# reverse output format
distance_nodes_map = dict()
max_distance = np.max(list(node_to_level.values()))
for node, distance in node_to_level.items():
reversed_distance = max_distance - distance
if reversed_distance in distance_nodes_map:
distance_nodes_map[reversed_distance].add(node)
else:
distance_nodes_map[reversed_distance] = set([node])
# print(distance_nodes_map)
for ii, nodes in sorted(distance_nodes_map.items())[::-1]:
print(f"{ii} : {nodes}")
Yields:
# 4 : {'super'}
# 3 : {'high-x', 'high-y', 'high-z'}
# 2 : {'mid-p', 'mid-r', 'mid-q'}
# 1 : {'low-a', 'low-b', 'low-c'}
# 0 : {'base-one', 'base-zero'}
(*) "subtract distance d(node x, root b)" naturally implied the longest path length between node x and root b. Obviously.
I understand that I can only concatenate things of similar types, but I'm really confused as to why the following are of different types.
this a part of my code:
import sys
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import gurobipy as GRB
def solveOptCyclePartition(diGraph):
# Create the Optimization Model
try:
m = GRB.Model("ctrlModel")
linkWeights = {}
for (u,v) in sorted(diGraph.edges(data=False)):
linkWeights[(u,v)] = diGraph[u][v]['weight']
#Create variables
e = m.addVars(sorted(diGraph.edges(data=False)),lb=0.0,ub=1.0,
obj=linkWeights , name="e")
# Add the objective function
m.setObjective( (e.prod(linkWeights)), GRB.MAXIMIZE)
#Add Constraint: One output edge from each node
m.addConstrs( (e.sum(i,'*') == 1 for i in nx.nodes(diGraph)), "outDegree")
#Add Constraint: One input edge from each node
m.addConstrs( (e.sum('*',i) == 1 for i in nx.nodes(diGraph)), "inDegree")
# Compute optimal solution
m.optimize()
# Print solution
if m.status == GRB.Status.OPTIMAL:
objOptimalVal = m.getAttr('objVal')
solution = m.getAttr('x', e)
for (u,v) in sorted(diGraph.edges(data=False)):
if solution[u,v] > 0:
print('%s -> %s: %g' % (u, v, solution[u,v]))
return objOptimalVal , solution
except:
print('Error reported')
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#def main():
# Network Adjacency Matrix
A = np.matrix([[0,0,0],
[1,0,0],
[1,0,1]])
G = nx.DiGraph()
G = nx.from_numpy_matrix(A)
# Check to see if connected
if nx.is_connected(G) == False:
print('The graph is not connected and has unaccessible nodes')
sys.exit(0)
# Nodes and Edge Lists
nodes = nx.nodes(G)
N = len(nodes)
edgeList = nx.to_edgelist(G)
# M Actuator nodes are selected randomly fron N nodes
M = 1;
augNodes = range(N,N+M)
actuatorNodes = sorted(list(np.random.permutation(np.arange(N))[:M]))
actuatorEdges = [(i,actuatorNodes[N-i],{'weight':1}) for i in augNodes ]
# Augmented edges lists and augmented graph
augEdgeList = (edgeList + actuatorEdges +
[(i,j,{'weight':0}) for i in nodes for j in augNodes] +
[(i,i,{'weight':0}) for i in nodes+augNodes if (i,i,{'weight':1}) not in edgeList])
Gprim = nx.DiGraph()
Gprim = nx.from_edgelist(augEdgeList,Gprim)
# Poljak Algorithm: Perform maximum weight cycle partitioning on
# augmented graph and return the optimal solution
objOptimalVal, solution = solveOptCyclePartition(Gprim)
plt.draw()
# Evaluate the solution
#if __name__ == '__main__':
# main()
I'm trying to solve cycle partitioning to find among all the partitions the one that encompasses the maximum number of edges with unit weight and satisfies the
following constraint. but i get this error:
*** TypeError: can only concatenate list (not "range") to list
Without the full stack trace it's hard to be sure exactly where this error is occurring, but I believe that it is happening on the following line:
[(i,i,{'weight':0}) for i in nodes+augNodes if (i,i,{'weight':1}) not in edgeList])
The particular section to pay attention to is nodes+augNodes.
The nodes variable contains the return value from calling networkx.nodes() which returns a list according to the documentation. augNodes on the other hand is being set to a range on the following line:
augNodes = range(N,N+M)
Since you can't append a range to a list, you're getting the TypeError that you see.
I am trying to implement a function to remove some fraction of high degree nodes based on initial lcc (1% in every step) until the lcc will be destructed completely (size zero). to test my code I am using DBLP coauthor ship network which is already shown that the lcc of the this graph will be destructed after removing 1.5% of high degree nodes. However my code fail to that,.
one thing that I am not sure is that when we mask some nodes and then use GraphView(G,vfilt=mask), it does not take of isolated vertices after masking. So I had to check for them manually. thanks for any hint!
def _breake_lcc(self, G, remove_ratio):
'''
G:graph
remove_ratio = ratio for removing high degree nodes
'''
#lcc of complete graph
l = gt.label_largest_component(G)
lcc_graph = gt.GraphView(G,vfilt=l)
lcc_org_size = float(lcc_graph.num_vertices())
lcc_size = [lcc_org_size/lcc_org_size]
#print 'lcc_size_zero: ', lcc_graph.num_vertices()
#extract the vertices inside lcc
node_ind = np.where(l.a==1)[0]
#compute the degree of all nodes
degree = lcc_graph.degree_property_map('total')
#extract the degreee of nodes in the lcc
lcc_vertex_degree = degree.a[node_ind]
#sort the nodes based on degree
sorted_idx = np.argsort(lcc_vertex_degree)[::-1]
sorted_vertex = set(node_ind[sorted_idx])
#number of nodes
n = len(node_ind)
#number of top x% degree nodes to be removed -ratio to size
remove_size = int((n*remove_ratio)/100.)
print 'remove size',remove_size
step = 1
#extract top x% vertices to remove
nodes_to_be_removed = list(sorted_vertex)[:remove_size]
#removed the above vertices from the list of valid vertices
valid_vertices = list(sorted_vertex - set(nodes_to_be_removed))
#continue until there is no other node to remove
while len(valid_vertices)>0:
print step * remove_ratio
# print len(valid_vertices)
#filter out those nodes from lcc.
#THIS IS AUTOMATICALLY UPDATE LCC_GRAPH
l.a[np.array(nodes_to_be_removed)] = False
#I REALIZED GRAPHVIEW DOES NOT TAKE CARE OF ISOLATED NODE AUTOMATICALLY
#SO I CHECK FOR THEM MANUALLY
ind_remove = [int(i) for i in lcc_graph.vertices() if i.out_degree()==0]
if len(ind_remove)>0:
print len(ind_remove)
mask = lcc_graph.new_vertex_property("bool")
#mask.a[::] = True
mask.a = l.a.copy()
#l.a[np.array(ind_remove)] = False
mask.a[np.array(ind_remove)] = False
#mask.a[nodes_to_be_removed] = False
#print np.all(mask.a==l.a)
lcc_graph = gt.GraphView(lcc_graph, vfilt=mask)
#update the valid nodes
valid_vertices = list(set(valid_vertices) - set(ind_remove))
print len(valid_vertices), len(sorted_vertex),lcc_graph.num_vertices()
lcc_temp_size = float(lcc_graph.num_vertices())
#print 'valid', len(valid_vertices),lcc_temp_size
valid_vertices = list(sorted_vertex -set(nodes_to_be_removed))
nodes_to_be_removed = valid_vertices[:int(remove_size)]
print lcc_temp_size,lcc_org_size
rel_size = lcc_temp_size/lcc_org_size
lcc_size.append(rel_size)
print 'lcc size', rel_size
print '------------'
step+=1
return lcc_size
I have a raster file and a WGS84 lat/lon point.
I would like to know what value in the raster corresponds with the point.
My feeling is that I should use GetSpatialRef() on the raster object or one of its bands and then apply a ogr.osr.CoordinateTransformation() to the point to map it to the raster's space.
My hope would then be that I could simply ask the rasters' bands what is at that point.
However, the raster object doesn't seem to have a GetSpatialRef() or a way to access a geo-located point, so I'm somewhat at a loss for how to do this.
Any thoughts?
Say i have a geotiff file test.tif. Then followin code should look up value somewhere near the pixel. I am not that confident for the part looking up cell, and will fix there is error. This page should help, "GDAL Data Model"
Also, you may go to gis.stackexchange.com to find experts, if you haven't.
import gdal, osr
class looker(object):
"""let you look up pixel value"""
def __init__(self, tifname='test.tif'):
"""Give name of tif file (or other raster data?)"""
# open the raster and its spatial reference
self.ds = gdal.Open(tifname)
srRaster = osr.SpatialReference(self.ds.GetProjection())
# get the WGS84 spatial reference
srPoint = osr.SpatialReference()
srPoint.ImportFromEPSG(4326) # WGS84
# coordinate transformation
self.ct = osr.CoordinateTransformation(srPoint, srRaster)
# geotranformation and its inverse
gt = self.ds.GetGeoTransform()
dev = (gt[1]*gt[5] - gt[2]*gt[4])
gtinv = ( gt[0] , gt[5]/dev, -gt[2]/dev,
gt[3], -gt[4]/dev, gt[1]/dev)
self.gt = gt
self.gtinv = gtinv
# band as array
b = self.ds.GetRasterBand(1)
self.arr = b.ReadAsArray()
def lookup(self, lon, lat):
"""look up value at lon, lat"""
# get coordinate of the raster
xgeo,ygeo,zgeo = self.ct.TransformPoint(lon, lat, 0)
# convert it to pixel/line on band
u = xgeo - self.gtinv[0]
v = ygeo - self.gtinv[3]
# FIXME this int() is probably bad idea, there should be
# half cell size thing needed
xpix = int(self.gtinv[1] * u + self.gtinv[2] * v)
ylin = int(self.gtinv[4] * u + self.gtinv[5] * v)
# look the value up
return self.arr[ylin,xpix]
# test
l = looker('test.tif')
lon,lat = -100,30
print l.lookup(lon,lat)
lat,lon =28.816944, -96.993333
print l.lookup(lon,lat)
Yes, the API isn't consistent. The raster (the data source) has a GetProjection() method instead (which returns WKT).
Here is a function that does what you want (drawn from here):
def extract_point_from_raster(point, data_source, band_number=1):
"""Return floating-point value that corresponds to given point."""
# Convert point co-ordinates so that they are in same projection as raster
point_sr = point.GetSpatialReference()
raster_sr = osr.SpatialReference()
raster_sr.ImportFromWkt(data_source.GetProjection())
transform = osr.CoordinateTransformation(point_sr, raster_sr)
point.Transform(transform)
# Convert geographic co-ordinates to pixel co-ordinates
x, y = point.GetX(), point.GetY()
forward_transform = Affine.from_gdal(*data_source.GetGeoTransform())
reverse_transform = ~forward_transform
px, py = reverse_transform * (x, y)
px, py = int(px + 0.5), int(py + 0.5)
# Extract pixel value
band = data_source.GetRasterBand(band_number)
structval = band.ReadRaster(px, py, 1, 1, buf_type=gdal.GDT_Float32)
result = struct.unpack('f', structval)[0]
if result == band.GetNoDataValue():
result = float('nan')
return result
Its documentation is as follows (drawn from here):
spatial.extract_point_from_raster(point, data_source, band_number=1)
data_source is a GDAL raster, and point is an OGR point object. The
function returns the value of the pixel of the specified band of
data_source that is nearest to point.
point and data_source need not be in the same reference system, but
they must both have an appropriate spatial reference defined.
If the point does not fall in the raster, RuntimeError is raised.
project = self.ds.GetProjection()
srPoint = osr.SpatialReference(wkt=project)
done... with that, the vector file has adopted the projection from input raster file