AttributeError: 'module' object has no attribute 'set_start_method' - python
That code below starts fine in pycharm.
But by starting with the command line:
"python field_basket_design_uwr.py"
it gives error:
Traceback (most recent call last):
File "field_basket_design_uwr.py", line 677, in <module>
mp.set_start_method('spawn')
AttributeError: 'module' object has no attribute 'set_start_method'
Has somebody an idea how to make the script starting without error?
#!/usr/bin/python3.5
import math
import sys
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk, Gdk as gdk, GLib, GObject as gobject
import string
import os
import subprocess
import glob
from datetime import datetime, timedelta
import time
import numpy as np
import matplotlib; matplotlib.use('Gtk3Agg')
import matplotlib.animation as animation
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib.text import Annotation
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
import matplotlib.pyplot as plt
import multiprocessing as mp
class Annotation3D(Annotation):
'''Annotate the point xyz with text s'''
def __init__(self, s, xyz, *args, **kwargs):
Annotation.__init__(self,s, xy=(0,0), *args, **kwargs)
self._verts3d = xyz
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.xy=(xs,ys)
Annotation.draw(self, renderer)
#
def annotate3D(ax, s, *args, **kwargs):
'''add anotation text s to to Axes3d ax'''
tag = Annotation3D(s, *args, **kwargs)
ax.add_artist(tag)
#
def draw_basket(ax1, x, y, z, h, color='black'):
'''add basket to the ax1 figure'''
t = np.linspace(0, np.pi * 2, 16)
ax1.plot(x+0.24*np.cos(t), y+0.24*np.sin(t), z, linewidth=1, color=color)
ax1.plot(x+0.16*np.cos(t), y+0.16*np.sin(t), z, linewidth=1, color=color)
ax1.plot(x+0.24*np.cos(t), y+0.24*np.sin(t), z+h, linewidth=1, color=color)
A=0
while A < 16:
xBar = [x+ 0.16 * math.sin(A*22.5*np.pi/180),x+ 0.24 * math.sin(A*22.5*np.pi/180)]
yBar = [y+ 0.16 * math.cos(A*22.5*np.pi/180),y+ 0.24 * math.cos(A*22.5*np.pi/180)]
zBar = [0,h]
ax1.plot(xBar, yBar, zBar, color=color)
A = A+1
def draw_halfsphere (ax1, x, y, z, sph_radius, color=(0,0,1,1)):
''' add free distance surface to Axes3d ax1 '''
u, v = np.mgrid[0:2 * np.pi:20j, 0:np.pi/2:10j]
xP1 = x + sph_radius * np.cos(u) * np.sin(v)
yP1 = y + sph_radius * np.sin(u) * np.sin(v)
zP1 = z - sph_radius * np.cos(v)
halffreesphere = ax1.plot_wireframe(xP1, yP1, zP1, color=color, alpha=0.3)
return halffreesphere
def OnClick(event):
global selected_coord
global clicked_coord
clicked_coord [0, 0] = clicked_coord [1, 0]
clicked_coord [0, 1] = clicked_coord [1, 1]
clicked_coord [0, 2] = clicked_coord [1, 2]
clicked_coord [1, 0] = selected_coord[0]
clicked_coord [1, 1] = selected_coord[1]
clicked_coord [1, 2] = selected_coord[2]
print ("selected position X: %5.2f Y: %5.2f Z: %5.2f" % (selected_coord[0], selected_coord[1],selected_coord[2]))
print ("distance between selected points: %5.2f", np.sqrt ((clicked_coord [0, 0] - clicked_coord [1, 0])**2
+ (clicked_coord [0, 1]- clicked_coord [1, 1])**2
+ (clicked_coord [0, 2] - clicked_coord [1, 2])**2))
def distance(point, event):
"""Return distance between mouse position and given data point
Args:
point (np.array): np.array of shape (3,), with x,y,z in data coords
event (MouseEvent): mouse event (which contains mouse position in .x and .xdata)
Returns:
distance (np.float64): distance (in screen coords) between mouse pos and data point
"""
x2, y2, _ = proj_transform(point[0], point[1], point[2], plt.gca().get_proj())
x3, y3 = ax1.transData.transform((x2, y2))
return np.sqrt ((x3 - event.x)**2 + (y3 - event.y)**2)
def calcClosestDatapoint(X, event):
""""Calculate which data point is closest to the mouse position.
Args:
X (np.array) - array of points, of shape (numPoints, 3)
event (MouseEvent) - mouse event (containing mouse position)
returns:
smallestIndex (int) - the index (into the array of points X) of the element closest to the mouse position
"""
distances = [distance (X[i, 0:3], event) for i in range(X.shape[0])]
return np.argmin(distances),np.amin(distances)
def annotatePlot(X, index):
global selected_coord
"""Create popover label in 3d chart
Args:
X (np.array) - array of points, of shape (numPoints, 3)
index (int) - index (into points array X) of item which should be printed
Returns:
None
"""
# If we have previously displayed another label, remove it first
if hasattr(annotatePlot, 'label'):
annotatePlot.label.remove()
# Get data point from array of points X, at position index
x2, y2, _ = proj_transform(X[index, 0], X[index, 1], X[index, 2], ax1.get_proj())
annotatePlot.label = plt.annotate( "Select %d" % (index+1),
xy = (x2, y2), xytext = (-20, 20), textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
# make coord from label available global for other function like distance measurement between points
selected_coord[0]=X[index, 0]
selected_coord[1]=X[index, 1]
selected_coord[2]=X[index, 2]
#
fig.canvas.draw()
def onMouseMotion(event):
global pos_pb_now, pos_pw_now
"""Event that is triggered when mouse is moved. Shows text annotation over data point closest to mouse."""
closestIndexW,LowestDistanceW = calcClosestDatapoint(pos_pw_now, event)
closestIndexB,LowestDistanceB = calcClosestDatapoint(pos_pb_now, event)
if LowestDistanceW < LowestDistanceB:
annotatePlot (pos_pw_now, closestIndexW)
else:
annotatePlot (pos_pb_now, closestIndexB)
#
def OneWindow(s_w_shared,s_d_shared,s_l_shared,el_w_shared,elevation_shared, azimut_shared, pb,
pw, ball):
import numpy as np
import matplotlib.pyplot as plt
''' Sub-processed Plot viewer of the main windows; copy/paste in one; it helps for PC with 2 monitors
The main windows remain the control window of the trainer. This window is the view windows of the trained player'''
#
def animate_one(i):
p_b_one._offsets3d = pos_pb_now_one[:, 0], pos_pb_now_one[:, 1], pos_pb_now_one[:, 2]
p_w_one._offsets3d = pos_pw_now_one[:, 0], pos_pw_now_one[:, 1], pos_pw_now_one[:, 2]
p_ball_one._offsets3d = pos_ball_now_one[:, 0], pos_ball_now_one[:, 1], pos_ball_now_one[:, 2]
ax1_one.view_init(elev=elevation_shared.value, azim=azimut_shared.value)
fig_one = plt.figure()
ax1_one = fig_one.add_subplot(111,projection='3d')
#
arrpb = np.frombuffer(pb.get_obj(), dtype='f')
pos_pb_now_one = np.reshape(arrpb, (6, 3))
#
arrpw = np.frombuffer(pw.get_obj(), dtype='f')
pos_pw_now_one = np.reshape(arrpw, (6, 3))
#
arrball = np.frombuffer(ball.get_obj(), dtype='f')
pos_ball_now_one = np.reshape(arrball, (1, 3))
xG = [0,s_w_shared.value,s_w_shared.value,0,0, 0,s_w_shared.value,s_w_shared.value,s_w_shared.value,
s_w_shared.value,s_w_shared.value, 0, 0,0, 0,s_w_shared.value]
yG = [0, 0, 0,0,0,s_l_shared.value,s_l_shared.value, 0, 0,s_l_shared.value,s_l_shared.value,s_l_shared.value,
s_l_shared.value,0,s_l_shared.value,s_l_shared.value]
zG = [0, 0, s_d_shared.value,s_d_shared.value,0, 0, 0, 0, s_d_shared.value, s_d_shared.value, 0, 0,
s_d_shared.value,s_d_shared.value, s_d_shared.value, s_d_shared.value]
ax1_one.plot_wireframe (xG,yG,zG,colors= (0,0,1,1)) # blue line game area
xW = [s_w_shared.value,s_w_shared.value+el_w_shared.value,s_w_shared.value+el_w_shared.value,s_w_shared.value,
s_w_shared.value,s_w_shared.value,s_w_shared.value+el_w_shared.value,s_w_shared.value+el_w_shared.value,
s_w_shared.value+el_w_shared.value,s_w_shared.value+el_w_shared.value,s_w_shared.value+el_w_shared.value,
s_w_shared.value,s_w_shared.value,s_w_shared.value,s_w_shared.value,s_w_shared.value+el_w_shared.value]
yW = [0, 0, 0, 0, 0,s_l_shared.value,s_l_shared.value, 0, 0,s_l_shared.value,s_l_shared.value,s_l_shared.value,
s_l_shared.value, 0,s_l_shared.value,s_l_shared.value]
zW = [0, 0, s_d_shared.value, s_d_shared.value, 0, 0, 0, 0, s_d_shared.value, s_d_shared.value, 0, 0,
s_d_shared.value, s_d_shared.value, s_d_shared.value, s_d_shared.value]
ax1_one.plot_wireframe (xW,yW,zW,colors= (0,1,1,1)) # light blue line exchange area
#
ax1_one.set_xlabel('Wide')
ax1_one.set_ylabel('Length')
ax1_one.set_zlabel('Water')
#
# draw the 2 lines which show the depth
xG1 = [0, s_w_shared.value]
yG1 = [s_d_shared.value, s_d_shared.value]
zG1 = [0, 0]
ax1_one.plot_wireframe(xG1, yG1, zG1, colors=(0, 0, 1, 1),linestyle=':') # blue line
xG2 = [0, s_w_shared.value]
yG2 = [s_l_shared.value-s_d_shared.value, s_l_shared.value-s_d_shared.value]
zG2 = [0, 0]
ax1_one.plot_wireframe(xG2, yG2, zG2, colors=(0, 0, 1, 1),linestyle=':') # blue line
#
# put the axis fix
ax1_one.set_xlim3d(0, s_w_shared.value+el_w_shared.value)
ax1_one.set_ylim3d(0, s_l_shared.value)
ax1_one.set_zlim3d(0, s_d_shared.value)
ax1_one.set_aspect(aspect=0.222)
draw_basket(ax1_one, s_w_shared.value / 2, 0.24, 0., 0.45)
draw_basket(ax1_one, s_w_shared.value / 2, s_l_shared.value - 0.24, 0., 0.45)
#
p_b_one = ax1_one.scatter(pos_pb_now_one[:, 0], pos_pb_now_one[:, 1], pos_pb_now_one[:, 2],
s=400, alpha = 0.5, c=(0, 0, 1, 1))
p_w_one = ax1_one.scatter(pos_pw_now_one[:, 0], pos_pw_now_one[:, 1],
pos_pw_now_one[:, 2], s=400, alpha = 0.5, c="darkgrey")
p_ball_one = ax1_one.scatter(pos_ball_now_one[:,0], pos_ball_now_one[:,1],
pos_ball_now_one[:,2], s=100, alpha = 0.5, c="red")
for j, xyz_ in enumerate(pos_pb_now_one):
annotate3D(ax1_one, s=str(j+1), xyz=xyz_, fontsize=10, xytext=(-3,3),
textcoords='offset points', ha='right',va='bottom')
for j, xyz_ in enumerate(pos_pw_now_one):
annotate3D(ax1_one, s=str(j+1), xyz=xyz_, fontsize=10, xytext=(-3,3),
textcoords='offset points', ha='right', va='bottom')
Frame = 10
ani1_one = animation.FuncAnimation(fig_one, animate_one, frames=Frame, interval=600, blit=False, repeat=True,
repeat_delay=500)
#
plt.pause(0.001)
plt.show()
def animate(i):
global pos_pb_now, pos_pb_now_shared, pos_pb_target, p_b, pos_pb_deltamove
global pos_pw_now, pos_pw_now_shared, pos_pw_target, p_w, pos_pw_deltamove
global pos_ball_now, pos_ball_now_shared, pos_ball_target, p_ball, pos_ball_deltamove
global Frame
global count_iter
global video_page_iter
global azimut_shared
global elevation_shared
global video_file_name
# global EmitPosOneWin
# global EmitPosFourWin
global ax1
global free_sphere
#
azimut, elevation = ax1.azim, ax1.elev
# print ("azimut from main",azimut)
azimut_shared.value = azimut
# print ("azimut_shared value from main",azimut_shared.value)
elevation_shared.value = elevation
pos_ball_now[0,0] += (1. / Frame) * pos_ball_deltamove[0,0]
pos_ball_now[0,1] += (1. / Frame) * pos_ball_deltamove[0,1]
pos_ball_now[0,2] += (1. / Frame) * pos_ball_deltamove[0,2]
#
# EmitPosOneWin.put(['bp', 0, pos_ball_now[0,0], pos_ball_now[0,1], pos_ball_now[0,2]])
# EmitPosFourWin.put(['bp', 0, pos_ball_now[0,0], pos_ball_now[0,1], pos_ball_now[0,2]])
pos_ball_now_shared[0] = pos_ball_now[0, 0]
pos_ball_now_shared[1] = pos_ball_now[0, 1]
pos_ball_now_shared[2] = pos_ball_now[0, 2]
for j in range(6):
pos_pb_now[j, 0] += (1. / Frame) * pos_pb_deltamove[j, 0]
pos_pb_now[j, 1] += (1. / Frame) * pos_pb_deltamove[j, 1]
pos_pb_now[j, 2] += (1. / Frame) * pos_pb_deltamove[j, 2]
pos_pw_now[j, 0] += (1. / Frame) * pos_pw_deltamove[j, 0]
pos_pw_now[j, 1] += (1. / Frame) * pos_pw_deltamove[j, 1]
pos_pw_now[j, 2] += (1. / Frame) * pos_pw_deltamove[j, 2]
#
# feed the queue; queue because that animation could be paused
# EmitPosOneWin.put(['pb', j, pos_pb_now[j, 0], pos_pb_now[j, 1], pos_pb_now[j, 2]])
# EmitPosOneWin.put(['pw', j, pos_pw_now[j, 0], pos_pw_now[j, 1], pos_pw_now[j, 2]])
# EmitPosFourWin.put(['pb', j, pos_pb_now[j, 0], pos_pb_now[j, 1], pos_pb_now[j, 2]])
# EmitPosFourWin.put(['pw', j, pos_pw_now[j, 0], pos_pw_now[j, 1], pos_pw_now[j, 2]])
pos_pb_now_shared[j*3] = pos_pb_now[j,0]
pos_pb_now_shared[j*3+1] = pos_pb_now[j,1]
pos_pb_now_shared[j*3+2] = pos_pb_now[j,2]
pos_pw_now_shared[j*3] = pos_pw_now[j,0]
pos_pw_now_shared[j*3+1] = pos_pw_now[j,1]
pos_pw_now_shared[j*3+2] = pos_pw_now[j,2]
#
p_b._offsets3d = pos_pb_now[:, 0], pos_pb_now[:, 1], pos_pb_now[:, 2]
p_w._offsets3d = pos_pw_now[:, 0], pos_pw_now[:, 1], pos_pw_now[:, 2]
p_ball._offsets3d = pos_ball_now[:,0],pos_ball_now[:,1],pos_ball_now[:,2]
#
video_page_iter = video_page_iter+1 # if video is on
plt.savefig("/home/family/Bilder" + "/file%03d.png" % video_page_iter) # if video is on
#
if video_page_iter==100: # or if command store video
os.chdir("/home/family/Bilder")
subprocess.call([
'ffmpeg', '-framerate', '8', '-i', 'file%03d.png', '-r', '30', '-pix_fmt', 'yuv420p',
# 'video_name.mp4'
video_file_name
]) # add -y to overwrite test this
for file_name in glob.glob("*.png"):
os.remove(file_name)
video_page_iter = 0
# simulate the deletion of the free domain. Will be activated later by a GUI
free_sphere.remove()
# fig.canvas.draw()
if i == (Frame - 1):
# reset the deltamove to a clean zero for last position in case of rounding elements
# or set to next step of dynamic move
count_iter = count_iter+1
m, s = divmod(count_iter, 2)
if s == 1:
free_sphere.remove()
fig.canvas.draw()
pos_ball_deltamove[0,0] = -1.
pos_ball_deltamove[0,1] = -1.
pos_ball_deltamove[0,2] = -1.
for k in range(6):
pos_pb_deltamove[k, 0] = -1.
pos_pb_deltamove[k, 1] = -1.
pos_pb_deltamove[k, 2] = -1.
pos_pw_deltamove[k, 0] = -1.
pos_pw_deltamove[k, 1] = -1.
pos_pw_deltamove[k, 2] = -1.
else:
free_sphere = draw_halfsphere(ax1, 5., 9., 4., 2.)
pos_ball_deltamove[0,0] = 1.
pos_ball_deltamove[0,1] = 1.
pos_ball_deltamove[0,2] = 1.
for k in range(6):
pos_pb_deltamove[k, 0] = 1.
pos_pb_deltamove[k, 1] = 1.
pos_pb_deltamove[k, 2] = 1.
pos_pw_deltamove[k, 0] = 1.
pos_pw_deltamove[k, 1] = 1.
pos_pw_deltamove[k, 2] = 1.
pos_ball_now[0,0] = pos_ball_target[0,0]
pos_ball_now[0,1] = pos_ball_target[0,1]
pos_ball_now[0,2] = pos_ball_target[0,2]
pos_ball_now_shared[0] = pos_ball_now[0, 0]
pos_ball_now_shared[1] = pos_ball_now[0, 1]
pos_ball_now_shared[2] = pos_ball_now[0, 2]
for k in range(6):
pos_pb_now[k, 0] = pos_pb_target[k, 0]
pos_pb_now[k, 1] = pos_pb_target[k, 1]
pos_pb_now[k, 2] = pos_pb_target[k, 2]
pos_pw_now[k, 0] = pos_pw_target[k, 0]
pos_pw_now[k, 1] = pos_pw_target[k, 1]
pos_pw_now[k, 2] = pos_pw_target[k, 2]
pos_pb_now_shared[k * 3] = pos_pb_now[k, 0]
pos_pb_now_shared[k * 3 + 1] = pos_pb_now[k, 1]
pos_pb_now_shared[k * 3 + 2] = pos_pb_now[k, 2]
pos_pw_now_shared[k * 3] = pos_pw_now[k, 0]
pos_pw_now_shared[k * 3 + 1] = pos_pw_now[k, 1]
pos_pw_now_shared[k * 3 + 2] = pos_pw_now[k, 2]
#
if __name__=="__main__":
#
######## define the queues for the 2 detached plot processes
mp.set_start_method('spawn')
#
s_w = 10.0
# s_w_shared = Value('d', 10.0)
s_w_shared = mp.Value('f', 10.0)
#
s_d = 4.0
s_d_shared = mp.Value('f', 4.0)
#
s_l = 18.0
s_l_shared = mp.Value('f', 18.0)
# exchange lane width
el_w = 1.0 # normally 3
el_w_shared = mp.Value('f', 1.0) # just 1m in order to show the side
# ball radius
# b_r = 0.53 / (2 * math.pi)
# b_r_shared = Value('d', 0.53 / (2 * math.pi))
#
elevation_shared = mp.Value('f', 10.)
azimut_shared = mp.Value('f', 30.)
#
# define/initiate teams blue and white; array
pos_pb_now = []
pos_pb_now_shared = mp.Array('f',3*6)
pos_pb_target = []
pos_pw_now = []
pos_pw_now_shared = mp.Array('f',3*6)
pos_pw_target = []
pos_pb_deltamove = []
pos_pw_deltamove = []
#
pos_ball_now = []
pos_ball_now_shared = mp.Array('f',3)
pos_ball_target = []
pos_ball_deltamove = []
#
clicked_coord = [] # matrix 2x3 for storing coord of clicked points for distance calculation
clicked_coord.append([0., 0., 0.])
clicked_coord.append([0., 0., 0.])
#
selected_coord = [0., 0., 0.]
#
numb_seq = 0
video_page_iter = 0
video_file_name = "test_video_name.mp4"
#
pos_ball_now.append([5.,9.,0.2]) # ball in the middle
pos_ball_target.append([5.,9.,0.2])
pos_ball_deltamove.append([0., 0., 0.])
#
for i in range(6):
# distribute the players at the side with the same distance
# at game start
pos_pb_now.append([((s_w/6)/2)+i*(s_w/6),1.0, s_d])
pos_pb_target.append([((s_w/6)/2)+i*(s_w/6),1.0, s_d])
pos_pw_now.append([s_w - ((s_w / 6) / 2) - i * (s_w / 6), s_l - 1.0, s_d])
pos_pw_target.append([s_w - ((s_w / 6) / 2) - i * (s_w / 6), s_l - 1.0, s_d])
pos_pb_deltamove.append([0., 0., 0.])
pos_pw_deltamove.append([0., 0., 0.])
#
# Define numpy array which is faster to work with
pos_pb_now = np.array(pos_pb_now, dtype='f')
pos_pb_target = np.array(pos_pb_target, dtype='f')
pos_pw_now = np.array(pos_pw_now, dtype='f')
pos_pw_target = np.array(pos_pw_target, dtype='f')
pos_pb_deltamove = np.array(pos_pb_deltamove, dtype='f')
pos_pw_deltamove = np.array(pos_pw_deltamove, dtype='f')
#
pos_ball_now = np.array(pos_ball_now, dtype='f')
pos_ball_target = np.array(pos_ball_target, dtype='f')
pos_ball_deltamove = np.array(pos_ball_deltamove, dtype='f')
#
clicked_coord = np.array(clicked_coord, dtype='f')
selected_coord = np.array(selected_coord, dtype='f')
#
fig = plt.figure()
ax1 = fig.add_subplot(111,projection='3d')
# field
xG = [0,s_w,s_w,0,0, 0,s_w,s_w,s_w,s_w,s_w, 0, 0,0, 0,s_w]
yG = [0, 0, 0,0,0,s_l,s_l, 0, 0,s_l,s_l,s_l,s_l,0,s_l,s_l]
zG = [0, 0, s_d,s_d,0, 0, 0, 0, s_d, s_d, 0, 0, s_d,s_d, s_d, s_d]
ax1.plot_wireframe (xG,yG,zG,colors= (0,0,1,1)) # blue line game area
# exchange area
xW = [s_w,s_w+el_w,s_w+el_w,s_w,s_w,s_w,s_w+el_w,s_w+el_w,s_w+el_w,s_w+el_w,s_w+el_w,s_w,s_w,s_w,s_w,s_w+el_w]
yW = [0, 0, 0, 0, 0,s_l,s_l, 0, 0,s_l,s_l,s_l,s_l, 0,s_l,s_l]
zW = [0, 0, s_d, s_d, 0, 0, 0, 0, s_d, s_d, 0, 0, s_d, s_d, s_d, s_d]
ax1.plot_wireframe (xW,yW,zW,colors= (0,1,1,1)) # light blue line exchange area
#
ax1.set_xlabel('Wide')
ax1.set_ylabel('Length')
ax1.set_zlabel('Water')
#
# draw the 2 lines which show the depth
xG1 = [0, s_w]
yG1 = [s_d, s_d]
zG1 = [0, 0]
ax1.plot_wireframe(xG1, yG1, zG1, colors=(0, 0, 1, 1),linestyle=':') # blue line
xG2 = [0, s_w]
yG2 = [s_l-s_d, s_l-s_d]
zG2 = [0, 0]
ax1.plot_wireframe(xG2, yG2, zG2, colors=(0, 0, 1, 1),linestyle=':') # blue line
#
# put the axis fix
ax1.set_xlim3d(0, s_w+el_w)
ax1.set_ylim3d(0, s_l)
ax1.set_zlim3d(0, s_d)
ax1.set_aspect(aspect=0.15) # the best
draw_basket(ax1, s_w / 2, 0.24, 0., 0.45)
draw_basket(ax1, s_w / 2, s_l - 0.24, 0., 0.45)
free_sphere = draw_halfsphere(ax1, 5., 9., 4., 2.)
p_b = ax1.scatter(pos_pb_now[:, 0], pos_pb_now[:, 1], pos_pb_now[:, 2],
s=400, alpha = 0.5, c=(0, 0, 1, 1))
p_w = ax1.scatter(pos_pw_now[:, 0], pos_pw_now[:, 1],
pos_pw_now[:, 2], s=400, alpha = 0.5, c="darkgrey")
p_ball = ax1.scatter(pos_ball_now[:,0], pos_ball_now[:,1],
pos_ball_now[:,2], s=100, alpha = 0.5, c="red")
for j, xyz_ in enumerate(pos_pb_now):
annotate3D(ax1, s=str(j+1), xyz=xyz_, fontsize=10, xytext=(-3,3),
textcoords='offset points', ha='right',va='bottom')
for j, xyz_ in enumerate(pos_pw_now):
annotate3D(ax1, s=str(j+1), xyz=xyz_, fontsize=10, xytext=(-3,3),
textcoords='offset points', ha='right', va='bottom')
Frame = 5
for j in range(6):
pos_pb_deltamove[j, 0] = 1.
pos_pb_deltamove[j, 1] = 1.
pos_pb_deltamove[j, 2] = 1.
pos_pw_deltamove[j, 0] = 1.
pos_pw_deltamove[j, 1] = 1.
pos_pw_deltamove[j, 2] = 1.
pos_ball_deltamove[0,0] = 1.
pos_ball_deltamove[0,1] = 1.
pos_ball_deltamove[0,2] = 1.
count_iter = 0
ani1 = animation.FuncAnimation(fig, animate, frames=Frame, interval=1000, blit=False, repeat=True, repeat_delay=1000)
plt.pause(0.001)
p1 = mp.Process(target=OneWindow, args=(s_w_shared, s_d_shared, s_l_shared, el_w_shared,elevation_shared,
azimut_shared, pos_pb_now_shared, pos_pw_now_shared, pos_ball_now_shared))
p1.start()
fig.canvas.mpl_connect('motion_notify_event', onMouseMotion)
fig.canvas.mpl_connect('button_press_event', OnClick)
plt.show()
EDIT1:
"python3 field_basket_design_uwr.py" works.
error which is still coming; perhaps subject to a new thread (not disturbing for the moment); anyway, any comment ho to take this away is welcome. Thanks.
/usr/lib/python3/dist-packages/matplotlib/backend_bases.py:2445: MatplotlibDeprecationWarning: Using default event loop until function specific to this GUI is implemented
warnings.warn(str, mplDeprecation)
/usr/lib/python3/dist-packages/cairocffi/surfaces.py:651: UserWarning: implicit cast from 'char *' to a different pointer type: will be forbidden in the future (check that the types are as you expect; use an explicit ffi.cast() if they are correct)
ffi.cast('char*', address), format, width, height, stride)
The set_start_method in multiprocessing was introduced in Python version 3.4
The error you are facing is due to the fact that you are using an older version of Python. Upgrading to Python 3.4 and above will fix the error.
For more information, refer to -
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method
I had the same issue, but it was not a version problem.
The problem was the file name which is multiprocessing.py in my own library.
When I import multiprocessing, it was importing the wrong file (my own file). So, I just changed the file name. I know it is a bit silly, but it may help others...
Edit: Here is an example. If you have multiprocessing.py file, and cat multiprocessing.py output is:
import multiprocessing
if __name__ == '__main__':
multiprocessing.set_start_method('fork')
you get this error. This is obviously because you include your own file instead of the real multiprocessing library. The solution is simply to change your file name to a different one.
Related
PyTorch: Constant loss value and output within linear neural network
I'm trying to make a neural network that calculates the right input angles for a rotation matrix. I'm having the classic linear network structure and at the last step the output is put into my function for the rotation, which returns a point in space as a list. Here's the code I wrote for it: import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable as V import torch.optim as opt import numpy as np import matplotlib.pyplot as plt cam_pos = np.array([500, 160, 1140, 1]) # with respect to vehicle coordinates img_res = (1280, 1080) aspect_ratio = img_res[0] / img_res[1] # in px cx = 636 / aspect_ratio cy = 548 / aspect_ratio fx = 241 / aspect_ratio fy = 238 / aspect_ratio u = 872 v = 423 D = 1900 # mm img_pt = np.array([u, v, 1, 1/D]).T camera_matrix = np.array([[fx, 0, cx, 0], [0, fy, cy, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) class Network(nn.Module): def __init__(self): super(Network, self).__init__() self.lin1 = nn.Linear(3,10) self.lin2 = nn.Linear(10,10) self.lin3 = nn.Linear(10,3) self.angle_list = [] def forward(self, x): x = F.relu(self.lin1(x)) x = F.relu(self.lin2(x)) x = self.lin3(x) self.angle_list.append(list(x.detach().numpy())) return torch.tensor(self.cam_function(x), requires_grad=True) def rot_x(self, alpha): return np.array([ [1, 0, 0, 0], [0, np.cos(alpha), -np.sin(alpha), 0], [0, np.sin(alpha), np.cos(alpha), 0], [0, 0, 0, 1] ]) def rot_y(self, beta): return np.array([ [np.cos(beta), 0, np.sin(beta), 0], [0, 1, 0, 0], [-np.sin(beta), 0, np.cos(beta), 0], [0, 0, 0, 1] ]) def rot_z(self, gamma): return np.array([ [np.cos(gamma), -np.sin(gamma), 0, 0], [np.sin(gamma), np.cos(gamma), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1] ]) def cam_function(self, net_output): net_output = net_output.detach().numpy() x = net_output[0] y = net_output[1] z = net_output[2] rot_m = np.dot(self.rot_z(z), np.dot(self.rot_y(y), self.rot_x(x))) extrinsic_matrix = np.array([ [rot_m[0][0], rot_m[0][1], rot_m[0][2], cam_pos[0]], [rot_m[1][0], rot_m[1][1], rot_m[1][2], cam_pos[1]], [rot_m[2][0], rot_m[2][1], rot_m[2][2], cam_pos[2]], [0, 0, 0, 1 ]]) cam_output = img_pt * D * np.linalg.inv(camera_matrix) * extrinsic_matrix / 1000 cam_output = [cam_output[0][0], cam_output[1][1], cam_output[2][2]] return cam_output model = Network() loss_function = nn.CrossEntropyLoss() optimizer = opt.SGD(model.parameters(), lr=1e-3) target = torch.tensor([1.636, 1.405, 0.262]).float() dummy_data = torch.tensor([0, 0, 0]).float() losses = [] for epoch in range(5000): model.train() prediction= model(dummy_data) loss = loss_function(prediction, target) losses.append(loss.item()) optimizer.zero_grad() loss.backward() optimizer.step() And with that I'm getting a constant value of the loss and the output as well. 7.3858967314779305 tensor([7.9938, 3.9272, 1.8514], dtype=torch.float64, requires_grad=True) 7.3858967314779305 tensor([7.9938, 3.9272, 1.8514], dtype=torch.float64, requires_grad=True) 7.3858967314779305 tensor([7.9938, 3.9272, 1.8514], dtype=torch.float64, requires_grad=True) Can someone help me please? If this works I would then extract the "angles" the NN used for the rotation matrix
Do not use numpy in cam_function, use torch.tensor. Using numpy, the gradient does not flow when using backward.
Compute Homography Direct - known camera location(s) - Image is... aliased?
I'm following the code presented here: Compute homography for a virtual camera with opencv As a note, I made a tiny modification to the code: in the translation matrix, I'm left-multiplying the first 3 rows of the last column by -R to get the translation in the global frame. I also changed the translation matrix definition to use "-dist" because in the global frame, movement toward the camera would be in the negative z direction. When I turn the X rotation to 0, I get a weird... aliased version of the loaded image that appears ABOVE the horizon line, where there should be nothing. My question: Why? Is this just a weird artifact of how the homography is calculated? How can I get rid of it? I know for x=0 (in the presented code) I can just ignore/erase anything above the horizon line, but my use case the x rotation might be -10 to 10 degrees or so - how can I calculate where the horizon line would be in those cases (so I can ignore image data above it) - or is there a mathematical solution the computing the homography that will bypass this problem all together? Thanks! EDIT: Adding in code/image in question: import cv2 import numpy as np rotXdeg = 90 rotYdeg = 90 rotZdeg = 90 f = 500 dist = 500 def onRotXChange(val): global rotXdeg rotXdeg = val def onRotYChange(val): global rotYdeg rotYdeg = val def onRotZChange(val): global rotZdeg rotZdeg = val def onFchange(val): global f f=val def onDistChange(val): global dist dist=val if __name__ == '__main__': #Read input image, and create output image src = cv2.imread('/path/to/image.jpg') dst = np.ndarray(shape=src.shape,dtype=src.dtype) #Create user interface with trackbars that will allow to modify the parameters of the transformation wndname1 = "Source:" wndname2 = "WarpPerspective: " cv2.namedWindow(wndname1, 1) cv2.namedWindow(wndname2, 1) cv2.createTrackbar("Rotation X", wndname2, rotXdeg, 180, onRotXChange) cv2.createTrackbar("Rotation Y", wndname2, rotYdeg, 180, onRotYChange) cv2.createTrackbar("Rotation Z", wndname2, rotZdeg, 180, onRotZChange) cv2.createTrackbar("f", wndname2, f, 2000, onFchange) cv2.createTrackbar("Distance", wndname2, dist, 2000, onDistChange) #Show original image cv2.imshow(wndname1, src) h , w = src.shape[:2] while True: rotX = (rotXdeg - 90)*np.pi/180 rotY = (rotYdeg - 90)*np.pi/180 rotZ = (rotZdeg - 90)*np.pi/180 #Projection 2D -> 3D matrix A1= np.matrix([[1, 0, -w/2], [0, 1, -h/2], [0, 0, 0 ], [0, 0, 1 ]]) # Rotation matrices around the X,Y,Z axis RX = np.matrix([[1, 0, 0, 0], [0,np.cos(rotX),-np.sin(rotX), 0], [0,np.sin(rotX),np.cos(rotX) , 0], [0, 0, 0, 1]]) RY = np.matrix([[ np.cos(rotY), 0, np.sin(rotY), 0], [ 0, 1, 0, 0], [ -np.sin(rotY), 0, np.cos(rotY), 0], [ 0, 0, 0, 1]]) RZ = np.matrix([[ np.cos(rotZ), -np.sin(rotZ), 0, 0], [ np.sin(rotZ), np.cos(rotZ), 0, 0], [ 0, 0, 1, 0], [ 0, 0, 0, 1]]) #Composed rotation matrix with (RX,RY,RZ) R = RX * RY * RZ #Translation matrix on the Z axis change dist will change the height T = np.matrix([[1,0,0,0], [0,1,0,0], [0,0,1,-dist], [0,0,0,1]]) extractT = T[:3, 3:4] solveT = -R[:3, :3]#extractT T[:3, 3:4] = solveT #Camera Intrisecs matrix 3D -> 2D A2= np.matrix([[f, 0, w/2,0], [0, f, h/2,0], [0, 0, 1,0]]) # Final and overall transformation matrix H = A2 * (T * (R * A1)) # Apply matrix transformation cv2.warpPerspective(src, H, (w, h), dst, cv2.INTER_CUBIC) #Show the image cv2.imshow(wndname2, dst) if (cv2.waitKey(1) == ord('q')): break Image:
Manim: creating objects behind existing objects (force z-index at creation time)
I'm using Manim CE 0.8.0, and I'm trying to fade in the axes behind the existing objects in the scene; I found no way to accomplish that. Here's a POC: from manim import * class FadeBehind(Scene): def construct(self): myDot = Dot( point = [0, 0, 0], radius = 3, color = RED, ) self.play( FadeIn(myDot), ) myLine = Line( start = [-5, 0, 0], end = [5, 0, 0], stroke_color = BLUE, stroke_width = 30, ) myLine.z_index = myDot.z_index - 1 self.play( FadeIn(myLine) # works as expected (the blue line is shown behind the dot) ) self.wait() ax = Axes( x_range=[-7, 7, 1], y_range=[-5, 5, 1], ) ax.z_index = myLine.z_index - 1 self.play( FadeIn(ax) # doesn't work as expected (the axes are overlayed on top of everything in the scene) )
The problem is, that the default z_index is 0: print(myDot.z_index) gives 0. And z_index have to be positive. Here is the script that works: class FadeBehind(Scene): def construct(self): myDot = Dot( point = [0, 0, 0], radius = 2, color = RED, ) self.play( FadeIn(myDot), ) myDot.z_index=1 myLine = Line( start = [-5, 0, 0], end = [5, 0, 0], stroke_color = BLUE, stroke_width = 30, ) myLine.z_index = 0 self.play( FadeIn(myLine) # works as expected (the blue line is shown behind the dot) ) ax = Axes( x_range=[-7, 7, 1], y_range=[-5, 5, 1], ) ax.z_index = 0 self.play( FadeIn(ax) # now works as expected since lower z-index )
H (observation) matrix in Kalman Filter when only measuring some of the state-space variables
I'm implementing a Kalman filter for an 2D tracked object. I'm measuring the position and the velocity of the object. For the moment, I assume I have all the data from the sensors at the same time, so my observation matrix H is H = eye(4,4), a 4x4 identity matrix. (See code below) However, in my final implementation I will have the data from the sensors at different times. So in some update loops I will have the velocity, and in others I will have the position. How would I write the H matrix in those cases? Is it okay to write [position loop] [1, 0, 0, 0 ] [0, 1, 0, 0 ] [0, 0, 0, 0 ] [0, 0, 0, 0 ] [velocity loop] [0, 0, 0, 0 ] [0, 0, 0, 0 ] [0, 0, 1, 0 ] [0, 0, 0, 1 ] Note that my state space variables are [x, y, vx, vy] I wonder if using those matrices does not imply that my observations are zero, or something like that. Can I leave the covariances matrices untouched? I guess not. #Implementation of 2D filter with FilterPy. import numpy as np from filterpy.kalman import KalmanFilter from filterpy.common import Q_discrete_white_noise import matplotlib.pyplot as plt # --------- PARAM ----------- dt = 0.1 v_dev = 0.3 pos_dev = 0.8 duration = 50 acceleration_noise = 0.3 # --------- MODEL ------------ transition_matrix = [[1,0,dt,0],[0,1,0,dt],[0,0,1,0],[0,0,0,1]] transition_covariance = np.array([ [ 0.25*pow(dt, 4), 0, 0.5* pow(dt, 3), 0 ], [ 0, 0.25*pow(dt, 4), 0, 0.5* pow(dt, 3)], [ 0.5* pow(dt, 3), 0, dt*dt, 0], [ 0, 0.5*dt*dt*dt, 0, dt*dt]]) * acceleration_noise *acceleration_noise # A large process noise favors the measurements. () #Transition matrix with acceleration componentn observation_matrix = np.eye(4, 4) initial_state = [0, 0, 0.5, 0.5] initial_state_covariance = [[ pos_dev*pos_dev, 0, 0 ,0],[0, pos_dev*pos_dev, 0, 0],[0, 0, v_dev * v_dev, 0 ],[0, 0, 0, v_dev * v_dev ]] observation_covariance = [[pos_dev * pos_dev , 0, 0 ,0],[0, pos_dev * pos_dev, 0, 0],[0, 0, v_dev * v_dev, 0 ],[0, 0, 0, v_dev * v_dev ]] #----------------------------- #---------- FAKE DATA --------- ind = np.array( range( round(duration/dt) ) ) time = ind * dt position = np.zeros( (2, len(ind)) ) position[0,:] = time position[1,:] = 3 * np.sin(time) noise = pos_dev * np.random.randn(2, len(ind)) noisy_pos = position + noise vel = position[:,1:len(ind)] - position[:,0:len(ind)-1] vel = vel / dt vel_ind = np.zeros( (2, len(ind) -1 ) ) vel_ind[0,:] = position[0,0:len(ind)-1] vel_ind[1,:] = position[1,0:len(ind)-1] vel_noise = v_dev * np.random.randn(2, len(ind) - 1 ) noisy_vel = vel + vel_noise observations = np.zeros((len(ind), 4)) observations[:,[0,1]] = np.transpose(noisy_pos) observations[1:len(ind),[2,3]] = np.transpose(noisy_vel) observations[0,[2,3]] = np.transpose(noisy_vel[[0,1],0] ) # KALMAN! filtered_state_means = np.zeros((len(time), 4)) filtered_state_covariances = np.zeros( ( len(time), 4, 4) ) kf = KalmanFilter( dim_x = 4, dim_z = 4) # state space: x, y, vx, vy, measuring all kf.x = np.array( initial_state ) kf.F = np.array( transition_matrix ) kf.H = np.array( observation_matrix ) kf.P = np.array( initial_state_covariance ) kf.Q = np.array( transition_covariance ) kf.R =np.array( observation_covariance ) #measurement covariance for i in range(0, len(time) ): # Ommitting some data points if( i > no_gps_start and i < no_gps_end): # No data from gps kf.H = np.array( ([0, 0, 0, 0],[0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]) ) else: kf.H = observation_matrix kf.predict() kf.update(observations[i]) filtered_state_means[i] = kf.x filtered_state_covariances[i] = kf.P # Plotting everything xmin = -2 xmax = 22 ymin = -4.3 ymax = 4.3 axisLimits = [xmin, xmax, ymin, ymax] plt.figure(1) plt.plot( position[0,:], position[1,:], linewidth=1 , color= '0.7') plt.plot( noisy_pos[0,:], noisy_pos[1,:], '.') plt.axis( axisLimits ) plt.figure(2) plt.plot( position[0,:], position[1,:], linewidth=1 , color= '0.7') plt.quiver( vel_ind[0,:], vel_ind[1,:], noisy_vel[0,:], noisy_vel[1,:], angles='xy', scale_units='xy', scale=10) plt.axis( axisLimits ) plt.figure(3) plt.plot( position[0,:], position[1,:], linewidth=1 , color= '0.7', zorder= 1) plt.plot( filtered_state_means[:,0], filtered_state_means[:,1], linewidth = 1, zorder= 2) plt.plot( noisy_pos[0,:], noisy_pos[1,:], '.', color = "#fd92f8", zorder= 0) plt.plot( no_gps_x, no_gps_y, 'ro') plt.show()
You are right, you are not allowed to modify the observation matrix in this way. In your case the best solution would be a sequential Kalman Filter, which was developed exactly for handling of missing measurements. The measurement vector is replaced through a sequence of separate scalar measurements. The filter can proceed them independently and is not corrupted if one or more measurements do not exist at some point in time. Have a look at Dan Simon's "Optimal State Estimation" Chapter 6.1 (you can try to find the book online). He derives alternative equations for the Kalman Filter, which are pretty easy to implement. The prediction step stays the same, you need to modify the update step. Pros: you don't need to compute the inverse matrix at all (nice for embedded systems) if your H matrix has a lot of zeros the equivalent sequential expressions are very short and computationally efficient Contras: the R matrix (measurement covariance) has to be diagonal
Matplotlib render all internal voxels (with alpha)
I want to render a volume in matplotlib. The volume is a simple 7x7x7 cube, and I want to be able to see all internal voxels (even though I know it will look like a mess). I've been able to render voxels with transparency, but any voxel not on the surface seems to never be drawn. Each 7x7 slice of the volume should look like this: I've thrown together a MWE The following code creates a 5x5x5 volume with a red,green,blue,yellow, and cyan 5x5 layers. The alpha of each layer is set to .5, so the whole thing should be see-through. Then I chang the colors of all non-surface voxels to black with alpha 1, so if they were showing we should be able to see a black box in the center. Rendering it by itself produces the figure on the left, but if we remove the fill from the cyan layer, we can see that the black box does indeed exist, it is just not being shown because it is 100% occluded even though those occluding voxels have alpha less than 1. import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # NOQA spatial_axes = [5, 5, 5] filled = np.ones(spatial_axes, dtype=np.bool) colors = np.empty(spatial_axes + [4], dtype=np.float32) alpha = .5 colors[0] = [1, 0, 0, alpha] colors[1] = [0, 1, 0, alpha] colors[2] = [0, 0, 1, alpha] colors[3] = [1, 1, 0, alpha] colors[4] = [0, 1, 1, alpha] # set all internal colors to black with alpha=1 colors[1:-1, 1:-1, 1:-1, 0:3] = 0 colors[1:-1, 1:-1, 1:-1, 3] = 1 fig = plt.figure() ax = fig.add_subplot('111', projection='3d') ax.voxels(filled, facecolors=colors, edgecolors='k') fig = plt.figure() ax = fig.add_subplot('111', projection='3d') filled[-1] = False ax.voxels(filled, facecolors=colors, edgecolors='k') Is there any way to render all occluded voxels?
To turn my comments above into an answer: You may always just plot all voxels as in Representing voxels with matplotlib 3D discrete heatmap in matplotlib The official example solves this problem by offsettingt the faces of the voxels by a bit, such they are all drawn. This matplotlib issue discusses the missing faces on internal cubes. There is a pull request which has some issues still and it hence not merged yet. Despite the small issues, you may monkey patch the current status of the pull request into your code: import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D, art3d # NOQA from matplotlib.cbook import _backports from collections import defaultdict import types def voxels(self, *args, **kwargs): if len(args) >= 3: # underscores indicate position only def voxels(__x, __y, __z, filled, **kwargs): return (__x, __y, __z), filled, kwargs else: def voxels(filled, **kwargs): return None, filled, kwargs xyz, filled, kwargs = voxels(*args, **kwargs) # check dimensions if filled.ndim != 3: raise ValueError("Argument filled must be 3-dimensional") size = np.array(filled.shape, dtype=np.intp) # check xyz coordinates, which are one larger than the filled shape coord_shape = tuple(size + 1) if xyz is None: x, y, z = np.indices(coord_shape) else: x, y, z = (_backports.broadcast_to(c, coord_shape) for c in xyz) def _broadcast_color_arg(color, name): if np.ndim(color) in (0, 1): # single color, like "red" or [1, 0, 0] return _backports.broadcast_to( color, filled.shape + np.shape(color)) elif np.ndim(color) in (3, 4): # 3D array of strings, or 4D array with last axis rgb if np.shape(color)[:3] != filled.shape: raise ValueError( "When multidimensional, {} must match the shape of " "filled".format(name)) return color else: raise ValueError("Invalid {} argument".format(name)) # intercept the facecolors, handling defaults and broacasting facecolors = kwargs.pop('facecolors', None) if facecolors is None: facecolors = self._get_patches_for_fill.get_next_color() facecolors = _broadcast_color_arg(facecolors, 'facecolors') # broadcast but no default on edgecolors edgecolors = kwargs.pop('edgecolors', None) edgecolors = _broadcast_color_arg(edgecolors, 'edgecolors') # include possibly occluded internal faces or not internal_faces = kwargs.pop('internal_faces', False) # always scale to the full array, even if the data is only in the center self.auto_scale_xyz(x, y, z) # points lying on corners of a square square = np.array([ [0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0] ], dtype=np.intp) voxel_faces = defaultdict(list) def permutation_matrices(n): """ Generator of cyclic permutation matices """ mat = np.eye(n, dtype=np.intp) for i in range(n): yield mat mat = np.roll(mat, 1, axis=0) for permute in permutation_matrices(3): pc, qc, rc = permute.T.dot(size) pinds = np.arange(pc) qinds = np.arange(qc) rinds = np.arange(rc) square_rot = square.dot(permute.T) for p in pinds: for q in qinds: p0 = permute.dot([p, q, 0]) i0 = tuple(p0) if filled[i0]: voxel_faces[i0].append(p0 + square_rot) # draw middle faces for r1, r2 in zip(rinds[:-1], rinds[1:]): p1 = permute.dot([p, q, r1]) p2 = permute.dot([p, q, r2]) i1 = tuple(p1) i2 = tuple(p2) if filled[i1] and (internal_faces or not filled[i2]): voxel_faces[i1].append(p2 + square_rot) elif (internal_faces or not filled[i1]) and filled[i2]: voxel_faces[i2].append(p2 + square_rot) # draw upper faces pk = permute.dot([p, q, rc-1]) pk2 = permute.dot([p, q, rc]) ik = tuple(pk) if filled[ik]: voxel_faces[ik].append(pk2 + square_rot) # iterate over the faces, and generate a Poly3DCollection for each voxel polygons = {} for coord, faces_inds in voxel_faces.items(): # convert indices into 3D positions if xyz is None: faces = faces_inds else: faces = [] for face_inds in faces_inds: ind = face_inds[:, 0], face_inds[:, 1], face_inds[:, 2] face = np.empty(face_inds.shape) face[:, 0] = x[ind] face[:, 1] = y[ind] face[:, 2] = z[ind] faces.append(face) poly = art3d.Poly3DCollection(faces, facecolors=facecolors[coord], edgecolors=edgecolors[coord], **kwargs ) self.add_collection3d(poly) polygons[coord] = poly return polygons spatial_axes = [5, 5, 5] filled = np.ones(spatial_axes, dtype=np.bool) colors = np.empty(spatial_axes + [4], dtype=np.float32) alpha = .5 colors[0] = [1, 0, 0, alpha] colors[1] = [0, 1, 0, alpha] colors[2] = [0, 0, 1, alpha] colors[3] = [1, 1, 0, alpha] colors[4] = [0, 1, 1, alpha] # set all internal colors to black with alpha=1 colors[1:-1, 1:-1, 1:-1, 0:3] = 0 colors[1:-1, 1:-1, 1:-1, 3] = 1 fig = plt.figure() ax = fig.add_subplot('111', projection='3d') ax.voxels = types.MethodType(voxels, ax) ax.voxels(filled, facecolors=colors, edgecolors='k',internal_faces=True) fig = plt.figure() ax = fig.add_subplot('111', projection='3d') ax.voxels = types.MethodType(voxels, ax) filled[-1] = False ax.voxels(filled, facecolors=colors, edgecolors='k',internal_faces=True) plt.show()