AssertionError trying to VisualiceNetworks - python

I'm trying to create a network connection graph with import networkx as nx and from pyvis.network import Network. The code is as follows
rules = pd.read_csv("EDMV.conexiones_neg.20220613.apriori.base2.txt", sep = "|")
G = nx.from_pandas_edgelist(rules, source = "desc.x", target = "desc.y", edge_attr = "lift")
net = Network(notebook = True, width=1000, height=600)
net.from_nx(G)
When I run the finale line I get an AssertionError.
Any ideas what is causing error?

All you need to do is to make sure that your nodes label type are either integer or string (in your case 'desc.x' and 'desc.y' columns).
try the following:
rules = pd.read_csv("EDMV.conexiones_neg.20220613.apriori.base2.txt", sep = "|")
rules["desc.x"] = rules["desc.x"].astype(str)
rules["desc.y"] = rules["desc.y"].astype(str)
G = nx.from_pandas_edgelist(rules, source = "desc.x", target = "desc.y", edge_attr = "lift")
net = Network(notebook = True, width=1000, height=600)
net.from_nx(G)

Related

how to maintain the surround view cameras (4 camera images) position in the point cloud

I am trying to generate 3D point cloud from 4 RGB-D images. I am able to do that with Open3D but I am unable to maintain the position of the images. You can find the camera_parameters.json here.
import open3d as o3d
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
import json
def load_json(path):
with open(path) as f:
return json.load(f)
def parse_camera_params(camera):
param = {}
param['camera'] = camera['camera']
param['depth'] = {}
param['depth']['fx'] = camera['K_depth'][0][0]
param['depth']['fy'] = camera['K_depth'][1][1]
param['depth']['cx'] = camera['K_depth'][0][2]
param['depth']['cy'] = camera['K_depth'][1][2]
param['depth']['K'] = camera['K_depth']
# ignore distCoeffs_depth's 5th (1000) and 6th (0) element
# since they are strange
param['depth']['distCoeffs'] = np.array(camera['distCoeffs_depth'][:5])
param['depth_width'] = camera['depth_width']
param['depth_height'] = camera['depth_height']
param['color'] = {}
param['color']['fx'] = camera['K_color'][0][0]
param['color']['fy'] = camera['K_color'][1][1]
param['color']['cx'] = camera['K_color'][0][2]
param['color']['cy'] = camera['K_color'][1][2]
param['color']['K'] = camera['K_color']
# ignore distCoeffs_color's 5th (1000) and 6th (0) element
# since they are strange
param['color']['distCoeffs'] = np.array(camera['distCoeffs_color'][:5])
param['color_width'] = camera['color_width']
param['color_height'] = camera['color_height']
# world to depth
w2d_T = np.array(camera['M_world2sensor'])
param['w2d_R'] = w2d_T[0:3, 0:3]
param['w2d_t'] = w2d_T[0:3, 3]
param['w2d_T'] = camera['M_world2sensor']
d2w_T = np.linalg.inv(w2d_T)
param['d2w_R'] = d2w_T[0:3, 0:3]
param['d2w_t'] = d2w_T[0:3, 3]
param['d2w_T'] = d2w_T
return param
if __name__ == '__main__':
data_dir = "data/"
camera_params = load_json(os.path.join(data_dir,
'camera_parameters.json'))
SVC_NUM = 4
pcd_combined = o3d.geometry.PointCloud()
for i in range(SVC_NUM):
param = parse_camera_params(camera_params['sensors'][i])
color = cv2.imread(os.path.join(data_dir, 'color_{:05d}.png'.format(i)))
color = cv2.cvtColor(color, cv2.COLOR_BGR2RGB)
depth = cv2.imread(os.path.join(data_dir, 'depth_{:05d}.png'.format(i)), -1)
# depth = depth * 0.001 # factor to scale the depth image from carla
o3d_color = o3d.geometry.Image(color)
o3d_depth = o3d.geometry.Image(depth)
rgbd_image = o3d.geometry.RGBDImage.create_from_tum_format(o3d_color, o3d_depth, False)
h, w = depth.shape
dfx, dfy, dcx, dcy = param['depth']['fx'], param['depth']['fy'], param['depth']['cx'], param['depth']['cy']
intrinsic = o3d.camera.PinholeCameraIntrinsic(w, h, dfx,dfy, dcx, dcy)
intrinsic.intrinsic_matrix = param['depth']['K']
cam = o3d.camera.PinholeCameraParameters()
cam.intrinsic = intrinsic
cam.extrinsic = np.array(param['w2d_T'])
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, cam.intrinsic, cam.extrinsic)
o3d.io.write_point_cloud("svc_{:05d}_v13.pcd".format(i), pcd)
pcd_combined += pcd
o3d.io.write_point_cloud("svc_global_v13.pcd", pcd_combined)
With the above code I am getting output of svc_global_v13.pcd like below
As you can see, all the images are projected into center. As indicated in the json file, I would like the images to be positioned as left, right, front and rear in the 3D point cloud.
May I know what is it I am missing here?

reducing scale of uv map for voxel faces python api

I would have a voxel object on which I map a texture. The problem is it looks really weird because the faces of the voxel cubes have more than one colour to them. I would like to reduce the color down to a singular colour for each voxel face. I found a way to do that within blender but since I want to run this headlessly I need to do the same thing at a lower level and run it via api call.
Attaching the code below. Any and all help would be appreciated.
import bpy
import os
removeThese = bpy.context.copy()
removeThese['selected_objects'] = list(bpy.context.scene.objects)
bpy.ops.object.delete(removeThese)
sourceDirectory = "model_folder"
model_name = "model.obj"
path = os.path.join(sourceDirectory, model_name)
bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl")
model_obj = bpy.context.scene.objects[model_name.split(".")[0]]
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = model_obj
model_obj.select_set(True)
sourceName = bpy.context.object.name
source = bpy.data.objects[sourceName]
#duplicating source model
bpy.ops.object.duplicate_move(OBJECT_OT_duplicate={"linked":False, "mode":'TRANSLATION'})
duplicate_obj = bpy.context.scene.objects[model_name.split(".")[0]+".001"]
bpy.ops.object.select_all(action='DESELECT')
bpy.context.view_layer.objects.active = duplicate_obj
duplicate_obj.select_set(True)
bpy.context.object.name = sourceName + "_Voxelized"
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
bpy.ops.object.convert(target='MESH')
#source.hide_render = True
#source.hide_viewport = True
targetName = bpy.context.object.name
target = bpy.data.objects[targetName]
#converting to blocks
bpy.ops.object.modifier_add(type='REMESH')
bpy.context.object.modifiers["Remesh"].mode = 'BLOCKS'
bpy.context.object.modifiers["Remesh"].octree_depth = 7
bpy.context.object.modifiers["Remesh"].scale = 0.5
bpy.context.object.modifiers["Remesh"].use_remove_disconnected = True
bpy.ops.object.modifier_apply(modifier="Remesh")
#transferring UVs from source to target
bpy.ops.object.modifier_add(type='DATA_TRANSFER')
bpy.context.object.modifiers["DataTransfer"].use_loop_data = True
bpy.context.object.modifiers["DataTransfer"].data_types_loops = {'UV'}
bpy.context.object.modifiers["DataTransfer"].loop_mapping = 'POLYINTERP_NEAREST'
bpy.context.object.modifiers["DataTransfer"].object = source
bpy.ops.object.datalayout_transfer(modifier="DataTransfer")
bpy.ops.object.modifier_apply(modifier="DataTransfer")
#this is the chunk that reduces each voxel face to one colour
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_mode(type='FACE')
bpy.context.area.ui_type = 'UV'
bpy.context.scene.tool_settings.use_uv_select_sync = False
bpy.context.space_data.uv_editor.sticky_select_mode = 'DISABLED'
bpy.context.scene.tool_settings.uv_select_mode = 'FACE'
bpy.context.space_data.pivot_point = 'INDIVIDUAL_ORIGINS'
bpy.ops.mesh.select_all(action='DESELECT')
#singularizing colours on each voxel face
count = 0
while count < 100:
bpy.ops.mesh.select_random(ratio=(count/100) + 0.01, seed=count)
bpy.ops.uv.select_all(action='SELECT')
bpy.ops.transform.resize(value=(0.01, 0.01, 0.01))
bpy.ops.mesh.hide(unselected=False)
count+=1
#returning to previous context
bpy.context.area.ui_type = 'VIEW_3D'
bpy.ops.mesh.reveal()
bpy.ops.object.editmode_toggle()
bpy.context.area.ui_type = 'TEXT_EDITOR'
#deleting source and keeping target
bpy.ops.object.select_all(action='DESELECT')
source.select_set(True)
bpy.ops.object.delete()
#selecting target
target.select_set(True)
#exporting voxelized model
output_directory = sourceDirectory
vox_model_name = targetName + '.obj'
output_file = os.path.join(output_directory, vox_model_name)
print(output_file)
bpy.ops.export_scene.obj(filepath=output_file)

Is there a simple way to convert Tkinter to matplotlib?

I need to modify the following code to print bar graph of the cluster populations. Briefly it stores all values in numpy arrays and than print the bar histogram (indicating number of conformations in each clusters on Y, and some inherent value of the cluster (energy) on X) using Tkinter module, which seems to be not very practical solution..
r = Tkinter.Tk()
dataList = []
reverseList = []
rLctr = 0
confL = d.ch.conformations
e = d.clusterer.energy_used
#for l in mol.cluSEQ:
for l in d.clusterer.clustering_dict[cut_off]:
dataList.append([l[0].energy, len(l)])
reverseList.append(range(rLctr, rLctr+len(l)))
mol.elist = numpy.array(elist)
mol.r = [numpy.minimum.reduce(mol.elist),
numpy.maximum.reduce(mol.elist)]
mol.nbins = Tkinter.IntVar()
mol.nbins.set(10)
mol.min = Tkinter.StringVar()
mol.min.set(str(mol.r[0]))
mol.max = Tkinter.StringVar()
mol.max.set(str(mol.r[1]))
r = (float(mol.min.get()), float(mol.max.get()))
mol.ehist = HistogramRI(mol.elist,mol.nbins.get(),range=r)
mol.ehist.createReverseIndex()
nodeList = mol.ehist.array
tstr = mol.name + ' histogram'
top = Tkinter.Toplevel()
top.title(tstr)
mol.ehist
#top = Tkinter.Toplevel()
xlabel = 'ENERGY'+ 'clusterized with ' + str(cut_off) + 'A'
mol.clustNB = InteractiveHistogramGraph(mol.name,
master=top, nodeList = dataList, reverseIndex=reverseList,
xlabel_text=xlabel,
ylabel_text='#\nC\nO\nN\nF\nO\nR\nM\nA\nT\nI\nO\nN\nS')
mol.clustNB.draw.update()
mol.clustNB.draw.postscript({'file':outputfilename, 'colormode':'color'})
top.update_idletasks()
Could you suggest me a simple way to convert it to the matplot lib in order that I could control all printing options?

Vertical positioning of nodes in Sankey diagram to avoid collision with links

I'm trying to make a Sankey-plot using Plotly, which follows the filtering of certain documents into either being in scope or out of scope, i.e. 1 source, 2 targets, however some documents are filtered during step 1, some during step 2 etc. This leads to the following Sankey-plot:
Current output
Now what I would ideally like is for it to look something like this:
Ideal output
I've already tried to look through the documentation on : https://plot.ly/python/reference/#sankey but I fail to find what I'm looking for, ideally I would like to implement a feature to prevent the plot from overlapping nodes and links.
This is the code I'm using the generate the plot object:
def genSankeyPlotObject(df, cat_cols=[], value_cols='', visible = False):
### COLORPLATTE TO USE
colorPalette = ['472d3c', '5e3643', '7a444a', 'a05b53', 'bf7958', 'eea160', 'f4cca1', 'b6d53c', '71aa34', '397b44',
'3c5956', '302c2e', '5a5353', '7d7071', 'a0938e', 'cfc6b8', 'dff6f5', '8aebf1', '28ccdf', '3978a8',
'394778', '39314b', '564064', '8e478c', 'cd6093', 'ffaeb6', 'f4b41b', 'f47e1b', 'e6482e', 'a93b3b',
'827094', '4f546b']
### CREATES LABELLIST FROM DEFINED COLUMNS
labelList = []
for catCol in cat_cols:
labelListTemp = list(set(df[catCol].values))
labelList = labelList + labelListTemp
labelList = list(dict.fromkeys(labelList))
### DEFINES THE NUMBER OF COLORS IN THE COLORPALLET
colorNum = len(df[cat_cols[0]].unique()) + len(df[cat_cols[1]].unique()) + len(df[cat_cols[2]].unique())
TempcolorPallet = colorPalette * math.ceil(len(colorPalette)/colorNum)
shuffle(TempcolorPallet)
colorList = TempcolorPallet[0:colorNum]
### TRANSFORMS DF INTO SOURCE -> TARGET PAIRS
for i in range(len(cat_cols)-1):
if i==0:
sourceTargetDf = df[[cat_cols[i],cat_cols[i+1],value_cols]]
sourceTargetDf.columns = ['source','target','count']
else:
tempDf = df[[cat_cols[i],cat_cols[i+1],value_cols]]
tempDf.columns = ['source','target','count']
sourceTargetDf = pd.concat([sourceTargetDf,tempDf])
sourceTargetDf = sourceTargetDf.groupby(['source','target']).agg({'count':'sum'}).reset_index()
### ADDING INDEX TO SOURCE -> TARGET PAIRS
sourceTargetDf['sourceID'] = sourceTargetDf['source'].apply(lambda x: labelList.index(x))
sourceTargetDf['targetID'] = sourceTargetDf['target'].apply(lambda x: labelList.index(x))
### CREATES THE SANKEY PLOT OBJECT
data = go.Sankey(node = dict(pad = 15,
thickness = 20,
line = dict(color = "black",
width = 0.5),
label = labelList,
color = colorList),
link = dict(source = sourceTargetDf['sourceID'],
target = sourceTargetDf['targetID'],
value = sourceTargetDf['count']),
valuesuffix = ' ' + value_cols,
visible = visible)
return data

How to use vispy to generate a rolling image?

Some context:
I was looking into the vispy module to plot in realtime (or as close as possible to) data coming from an instrument. My attempt follow.
from vispy.plot import Fig
from vispy import app,scene
from vispy.visuals import TextVisual
import numpy as np
import Queue
FONT_SIZE = 14
MIN = 0
MAX = 1.1
w_size = 100
N = 5000
M = 2500
color_map = 'cubehelix'
q_size = 1000
Nb = 5
#generate (empty) initial data to fill the plot
data = np.zeros(N*M)
data = np.reshape(data, (N,M))
#setup the plot
fig = Fig(show = False,size = (16*w_size,9*w_size),bgcolor='black')
fig.title = 'my plot'
main_plot = fig[0,0].image(data = data,fg_color='w',cmap=color_map,clim=(MIN,MAX))
fig[0,0].view.camera.aspect = N/float(M) * 16./9.
title = scene.Label("someoutput", font_size=FONT_SIZE, color = 'w')
fig[0,0].grid.add_widget(title, row=0, col=4)
fig[0,0].grid[2,4].border_color = 'black'
fig[0,0].grid[2,4].bgcolor = 'black'
xlabel_title = scene.Label("x_axis [unit]", font_size=FONT_SIZE, color = 'w')
fig[0,0].grid.add_widget(xlabel_title, row=4, col=4)
ylabel_title = scene.Label("y_axis [unit]", font_size=FONT_SIZE,rotation=-90, color='w')
fig[0,0].grid.add_widget(ylabel_title, row=2, col=2)
scale = scene.ColorBarWidget(orientation='left',
cmap=color_map,
label='Some value',
clim=(MIN,MAX),
border_color = 'w',
border_width = 1,
label_color = 'w'
)
fig[0,0].grid.add_widget(scale, row=2, col=6)
fig[0,0].cbar_right.width_max = \
fig[0,0].cbar_right.width_min = 50
#fill a queue so to excude the generation time from the plotting time
q = Queue.Queue()
for i in range(q_size):
new_data = (np.abs(0.5*np.random.randn(Nb*M)[:])).astype('float32')
new_data = np.reshape(new_data, (Nb,M))
q.put(new_data[:])
#update function
def update(ev):
global main_plot, q, data, Nb,M,fig,index
#acquire
new_data = q.get()
#roll the plot data
data[Nb:, :] = data[:-Nb, :]
data[:Nb,:] = new_data
#recycle the new data
q.put(new_data)
#update the plot
main_plot.set_data(data)
main_plot.update()
# setup timer
interv = 0.01
timer = app.Timer(interval = interv)
timer.connect(update)
timer.start(interval = interv)
if __name__ == '__main__':
fig.show(run=True)
app.run()
This code currently works but it's much slower than the data rate. In the vispy gallery, as well as in some examples, I saw much more points being plotted and updated. I think that the main problem is that I completely set each time all the data of the plot instead of shifting them and inserting new points.
I also had a look at this example:
https://github.com/vispy/vispy/blob/master/examples/demo/scene/oscilloscope.py
However I don't know how to generalize the update function that rolls the data (I have no knowledge of OpenGL) and I cannot use the example as is because I need a quantitative color scale (that seems well implemented in vispy.plot).
The question:
Is there a way to write a function that rolls the data of a plot generated with the vispy.plot class?
Thanks.

Categories