Get FEM, save plot as PNG? - python

I am using the python bindings for getfem, to that effect I wrote this script, following their tutorial:
import getfem as gf
import numpy as np
import math
center = [0.0, 0.0]
dir = [0.0, 1.0]
radius = 1.0
angle = 0.2 * math.pi
mo = gf.MesherObject("cone", center, dir, radius, angle)
h = 0.1
K = 2
mesh = gf.Mesh("generate", mo, h, K)
outer_faces = mesh.outer_faces()
OUTER_BOUND = 1
mesh.set_region(OUTER_BOUND, outer_faces)
sl = gf.Slice(("none",), mesh, 1)
mfu = gf.MeshFem(mesh, 1)
elements_degree = 2
mfu.set_classical_fem(elements_degree)
mim = gf.MeshIm(mesh, pow(elements_degree, 2))
md = gf.Model("real")
md.add_fem_variable("u", mfu)
md.add_Laplacian_brick(mim, "u")
F = 1.0
md.add_fem_data("F", mfu)
md.set_variable("F", np.repeat(F, mfu.nbdof()))
md.add_source_term_brick(mim, "u", "F")
md.add_Dirichlet_condition_with_multipliers(mim, "u", elements_degree - 1, OUTER_BOUND)
md.solve()
U = md.variable("u")
sl.export_to_vtk("u.vtk", "ascii", mfu, U, "U")
This exports a vtk file. Somewhere, I found a way to display it on a Jupyter notebook:
import pyvista as pv
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1280, 1024))
display.start()
p = pv.Plotter()
m = pv.read("u.vtk")
contours = m.contour()
p.add_mesh(m, show_edges=False)
p.add_mesh(contours, color="black", line_width=1)
p.add_mesh(m.contour(8).extract_largest(), opacity=0.1)
pts = m.points
p.show(window_size=[384, 384], cpos="xy")
display.stop()
It looks awfully compressed for some reason. I am trying to save it as a PNG instead.
Does anyone know how to convert the vtk into a png?
Paraview is depercated in modern systems for all intents and purposes so that's out the gate.

You can generate a screenshot without displaying it with:
import pyvista as pv
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1280, 1024))
display.start()
p = pv.Plotter(off_screen=True)
m = pv.read("u.vtk")
contours = m.contour()
p.add_mesh(m, show_edges=False)
p.add_mesh(contours, color="black", line_width=1)
p.add_mesh(m.contour(8).extract_largest(), opacity=0.1)
pts = m.points
p.show(cpos="xy", screenshot='screenshot.png')
display.stop()

Related

Why rasterizing in Python using GDAL does not work?

I am reading a shapefile that contains data ranging from 0 to 100 in Python using GDAL. Unfortunately, while it does not give errors, the result is not correct (compared with QGIS). I have tried different NoDataValue, but have not found the right result.
Here is the code:
from osgeo import gdal
from osgeo import ogr
import matplotlib.pyplot as plt
import numpy as np
import glob
import numpy.ma as ma
def Feature_to_Raster(input_shp, output_tiff, cellsize, field_name=True, NoData_value=-9999):
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer(0)
inp_srs = inp_lyr.GetSpatialRef()
# Extent
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
x_ncells = int((x_max - x_min) / cellsize)
y_ncells = int((y_max - y_min) / cellsize)
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,1, gdal.GDT_Float32)
out_source.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))
out_source.SetProjection(inp_srs.ExportToWkt())
out_lyr = out_source.GetRasterBand(1)
out_lyr.SetNoDataValue(NoData_value)
# Rasterize
# print(inp_lyr)
if field_name:
gdal.RasterizeLayer(out_source, [1], inp_lyr, options=["ATTRIBUTE=CT"])
else:
gdal.RasterizeLayer(out_source, [1], inp_lyr, burn_values=[1])
# Save and/or close the data sources
inp_source = None
out_source = None
ds= gdal.Open('name.tif')
ndv= ds.GetRasterBand(1).GetNoDataValue()
bnd1= ds.GetRasterBand(1).ReadAsArray()
bnd1[bnd1==ndv]= np.nan
tt= ma.masked_outside(bnd1, 1,100)
plt.imshow(tt, cmap='jet')
plt.colorbar()
plt.xlabel('Column #')
plt.ylabel('Row #')
plt.show()
# Return
return output_tiff
output_tiff= 'D:/myfolder/name.tif'
input_shp= 'D:/myfolder/cis_SGRDAMID_20101201.shp'
Feature_to_Raster(input_shp, output_tiff, cellsize, field_name=True, NoData_value=-9999)
Ive had more success with the gdal.Rasterize function
See if this solves your problem:
you can replace this:
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,1, gdal.GDT_Float32)
out_source.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))
out_source.SetProjection(inp_srs.ExportToWkt())
out_lyr = out_source.GetRasterBand(1)
out_lyr.SetNoDataValue(NoData_value)
# Rasterize
# print(inp_lyr)
if field_name:
gdal.RasterizeLayer(out_source, [1], inp_lyr, options=["ATTRIBUTE=CT"])
else:
gdal.RasterizeLayer(out_source, [1], inp_lyr, burn_values=[1])
with this:
if field_name:
# This will rasterize your shape file according to the specified attribute field
rasDs = gdal.Rasterize(output_tiff, input_shp,
xRes=cellsize, yRes=cellsize,
outputBounds=[x_min, y_min,x_max, y_max],
noData=NoData_value,
outputType=gdal.GDT_Float32
attribute='CT', # Or whatever your attribute field name is
allTouched=True)
else:
# This will just give 255 where there are vector data since no attribute is defined
rasDs = gdal.Rasterize(output_tiff, input_shp,
xRes=cellsize, yRes=cellsize,
outputBounds=[x_min, y_min,x_max, y_max],
noData=NoData_value,
outputType=gdal.GDT_Float32
allTouched=True)
rasDs = inp_source = None
And always remember to keep your cell-size relevant to your coordinate system, e.g. don't specify in meters when the projection of the shapefile is WGS...

How to project PyBullet simulation coordinates to rendered Frame pixel coordinates using OpenCV?

How can I convert an objects position in PyBullet to pixel coordinates & draw a line onto the frame using PyBullet & OpenCV?
We would like to do this because PyBullet native addUserDebugLine() function is not available in DIRECT mode.
import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
def capture_frame(base_pos=[0,0,0], _cam_dist=3, _cam_yaw=45, _cam_pitch=-45):
_render_width, _render_height = VIDEO_RESOLUTION
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(_render_width) / _render_height,
nearVal=0.01, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=_render_width, height=_render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) # ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array, view_matrix, proj_matrix
def render():
frame, vmat, pmat = capture_frame()
p1, cubeOrn = p.getBasePositionAndOrientation(1)
p2, cubeOrn = p.getBasePositionAndOrientation(2)
frame, view_matrix, proj_matrix = capture_frame()
frame = cv2.resize(frame, VIDEO_RESOLUTION)
points = {}
# reshape matrices
my_order = 'C'
pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
vmat = np.array(view_matrix).reshape((4,4), order=my_order)
fmat = vmat.T # pmat.T
# compute origin from origin point in simulation
origin = np.array([0,0,0,1])
frame_origin = (fmat # origin)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
# define unit vectors
unit_vectors = [ np.array([1,0,0,1]),
np.array([0,1,0,1]),
np.array([0,0,1,1]) ]
for col_id, unit_vector in enumerate(unit_vectors):
cur_point = (fmat # unit_vector)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
cv2.imwrite("my_rendering.jpg", frame)
print(p1,p2)
if __name__ == '__main__':
physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
startPos = [1,0,0.2]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
startPos = [0,2,0.2]
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
for i in range (2400):
if i == 2399:
render()
p.stepSimulation()
p.disconnect()
The expected output would be the following frame but with the origin-coordinate frame drawn correctly. E.g. X, Y, and Z axis are colored Red, Blue, and Green respectively.
Since the two R2D2 robots are positioned at [1,0,0] and [0,1,0] respectively, we can see that the coordinate frame is off. (See image below)
We tried the following:
transposing the matrices
not transposing the matrices
changing the order of how we compute fmat e.g. pmat # vmat instead of vmat # pmat etc.
Any help is appreciated.
After a lot of fiddling, I came to a solution.
Playing with it for a while, I came to a point where it looked almost OK except for a rotation of the axes given by the yaw angle. So, I did a second call to computeViewMatrixFromYawPitchRoll but with the opposite yaw in order to compute the transformation for the axes.
Unfortunately, I'm not sure about why this works... But it works!
Note: base_pos, _cam_dist, _cam_yaw and _cam_pitch have been displaced into render() Note also: the up direction has been reversed too (don't ask why... :-) ) A pretty messy explanation, I must admit...
import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
import os
VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
K=np.array([[1280,0,0],[0,720,0],[0,0,1]])
def capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch):
_render_width, _render_height = VIDEO_RESOLUTION
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(_render_width) / _render_height,
nearVal=0.01, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=_render_width, height=_render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) # ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array, view_matrix, proj_matrix
def render():
p1, cubeOrn = p.getBasePositionAndOrientation(1)
p2, cubeOrn = p.getBasePositionAndOrientation(2)
base_pos=[0,0,0]
_cam_dist=3
_cam_yaw=45
_cam_pitch=-30
frame, view_matrix, proj_matrix = capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch)
frame = cv2.resize(frame, VIDEO_RESOLUTION)
points = {}
# inverse transform
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=-_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
my_order = 'C'
pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
vmat = np.array(view_matrix).reshape((4,4), order=my_order)
fmat = pmat # vmat.T
# compute origin from origin point in simulation
origin = np.array([0,0,0,1])
frame_origin = (fmat # origin)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
# define unit vectors
unit_vectors = [ np.array([1,0,0,1]),
np.array([0,1,0,1]),
np.array([0,0,-1,1]) ]
for col_id, unit_vector in enumerate(unit_vectors):
cur_point = (fmat # unit_vector)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
cv2.imwrite("my_rendering.jpg", frame)
print(p1,p2)
if __name__ == '__main__':
physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
#physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
#arrows = p.loadURDF("arrows.urdf")
startPos = [1,0,0.2]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
startPos = [0,2,0.2]
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
for i in range (2400):
if i == 2399:
render()
p.stepSimulation()
p.disconnect()
Here is the result:
Best regards.

Nothing Happens When Running Script in Spyder

I am running this code and oddly, nothing happens. There is no error nor does it freeze. It simply just runs the code without storing variables, nothing is printed out and it doesn't open the window that is supposed to show the plot. So it simply does nothing. It is very odd. This worked only a few minutes ago and I did not change anything about it previously. I did make sure that the variable explorer is displaying all the definitions in the script. I intentionally removed the plotting section at the end since it just made the code set longer and the same issue persists here without it.
Code:
#Import libraries
import numpy as np
from scipy.integrate import odeint
#from scipy.integrate import solve_ivp
from time import time
import matplotlib.pyplot as plt
from matplotlib.pyplot import grid
from mpl_toolkits.mplot3d import Axes3D
import numpy, scipy.io
from matplotlib.patches import Circle
'''
import sympy as sy
import random as rand
from scipy import interpolate
'''
'''
Initiate Timer
'''
TimeStart = time()
'''
#User defined inputs
'''
TStep = (17.8E-13)
TFinal = (17.8E-10)
R0 = 0.02
V0X = 1E7
ParticleCount = 1 #No. of particles to generate energies for energy generation
BInput = 0.64 #Magnitude of B field near pole of magnet in experiment
ScaleV0Z = 1
'''
#Defining constants based on user input and nature (Cleared of all errors!)
'''
#Defining Space and Particle Density based on Pressure PV = NkT
k = 1.38E-23 #Boltzman Constant
#Natural Constants
Q_e = -1.602E-19 #Charge of electron
M_e = 9.11E-31 #Mass of electron
JToEv = 6.24E+18 #Joules to eV conversion
EpNaut = 8.854187E-12
u0 = 1.256E-6
k = 1/(4*np.pi*EpNaut)
QeMe = Q_e/M_e
'''
Create zeros matrices to populate later (Cannot create TimeIndex array!)
'''
TimeSpan = np.linspace(0,TFinal,num=round((TFinal/TStep)))
TimeIndex = np.linspace(0,TimeSpan.size,num=TimeSpan.size)
ParticleTrajectoryMat = np.zeros([91,TimeSpan.size,6])
BFieldTracking = np.zeros([TimeSpan.size,3])
InputAngle = np.array([np.linspace(0,90,91)])
OutputAngle = np.zeros([InputAngle.size,1])
OutputRadial = np.zeros([InputAngle.size,1])
'''
Define B-Field
'''
def BField(x,y,z):
InputCoord = np.array([x,y,z])
VolMag = 3.218E-6 #Volume of magnet in experiment in m^3
BR = np.sqrt(InputCoord[0]**2 + InputCoord[1]**2 + InputCoord[2]**2)
MagMoment = np.array([0,0,(BInput*VolMag)/u0])
BDipole = (u0/(4*np.pi))*(((3*InputCoord*np.dot(MagMoment,InputCoord))/BR**5)-(MagMoment/BR**3))
#BVec = np.array([BDipole[0],BDipole[1],BDipole[2]])
#print(BDipole[0],BDipole[1],BDipole[2])
return (BDipole[0],BDipole[1],BDipole[2])
'''
Lorentz Force Differential Equations Definition
'''
def LorentzForce(PosVel,t,Constants):
X,Y,Z,VX,VY,VZ = PosVel
Bx,By,Bz,QeMe = Constants
BFInput = np.array([Bx,By,Bz])
VelInput = np.array([VX,VY,VZ])
Accel = QeMe * (np.cross(VelInput, BFInput))
LFEqs = np.concatenate((VelInput, Accel), axis = 0)
return LFEqs
'''
Cartesean to Spherical coordinates converter function. Returns: [Radius (m), Theta (rad), Phi (rad)]
'''
def Cart2Sphere(xIn,yIn,zIn):
P = np.sqrt(xIn**2 + yIn**2 + zIn**2)
if xIn == 0:
Theta = np.pi/2
else:
Theta = np.arctan(yIn/xIn)
Phi = np.arccos(zIn/np.sqrt(xIn**2 + yIn**2 + zIn**2))
SphereVector = np.array([P,Theta,Phi])
return SphereVector
'''
Main Loop
'''
for angletrack in range(0,InputAngle.size):
MirrorAngle = InputAngle[0,angletrack]
MirrorAngleRad = MirrorAngle*(np.pi/180)
V0Z = np.abs(V0X/np.sin(MirrorAngleRad))*np.sqrt(1-(np.sin(MirrorAngleRad))**2)
V0Z = V0Z*ScaleV0Z
#Define initial conditions
V0 = np.array([[V0X,0,V0Z]])
S0 = np.array([[0,R0,0]])
ParticleTrajectoryMat[0,:] = np.concatenate((S0,V0),axis=None)
for timeplace in range(0,TimeIndex.size-1):
ICs = np.concatenate((S0,V0),axis=None)
Bx,By,Bz = BField(S0[0,0],S0[0,1],S0[0,2])
BFieldTracking[timeplace,:] = np.array([Bx,By,Bz])
AllConstantInputs = [Bx,By,Bz,QeMe]
t = np.array([TimeSpan[timeplace],TimeSpan[timeplace+1]])
ODESolution = odeint(LorentzForce,ICs,t,args=(AllConstantInputs,))
ParticleTrajectoryMat[angletrack,timeplace+1,:] = ODESolution[1,:]
S0[0,0:3] = ODESolution[1,0:3]
V0[0,0:3] = ODESolution[1,3:6]
MatSize = np.array([ParticleTrajectoryMat.shape])
RowNum = MatSize[0,1]
SphereMat = np.zeros([RowNum,3])
SphereMatDeg = np.zeros([RowNum,3])
for cart2sphereplace in range(0,RowNum):
SphereMat[cart2sphereplace,:] = Cart2Sphere(ParticleTrajectoryMat[angletrack,cart2sphereplace,0],ParticleTrajectoryMat[angletrack,cart2sphereplace,1],ParticleTrajectoryMat[angletrack,cart2sphereplace,2])
for rad2deg in range(0,RowNum):
SphereMatDeg[rad2deg,:] = np.array([SphereMat[rad2deg,0],(180/np.pi)*SphereMat[rad2deg,1],(180/np.pi)*SphereMat[rad2deg,2]])
PhiDegVec = np.array([SphereMatDeg[:,2]])
RVec = np.array([SphereMatDeg[:,0]])
MinPhi = np.amin(PhiDegVec)
MinPhiLocationTuple = np.where(PhiDegVec == np.amin(PhiDegVec))
MinPhiLocation = int(MinPhiLocationTuple[1])
RAtMinPhi = RVec[0,MinPhiLocation]
OutputAngle[angletrack,0] = MinPhi
OutputRadial[angletrack,0] = RAtMinPhi
print('Mirror Angle Input (In deg): ',InputAngle[0,angletrack])
print('Mirror Angle Output (In deg): ',MinPhi)
print('R Value at minimum Phi (m): ',RAtMinPhi)
InputAngleTrans = np.matrix.transpose(InputAngle)
CompareMat = np.concatenate((InputAngleTrans,OutputAngle),axis=1)

After drawing something by using Vispy, getting only white image

As the title, only white image is printed.
below is my codes
import numpy as np
from vispy import io, scene
c = scene.SceneCanvas(keys='interactive', bgcolor='w', dpi=96)
view = c.central_widget.add_view()
xx, yy = np.arange(-1,1,.02),np.arange(-1,1,.02)
X,Y = np.meshgrid(xx,yy)
R = np.sqrt(X**2+Y**2)
Z = lambda t : 0.1*np.sin(10*R-2*np.pi*t)
surf = scene.visuals.SurfacePlot(xx, yy, Z(0), color=[0.5, 0.5, 0.5], shading='smooth')
view.add(surf)
img = c.render()
io.write_png("vispytest.png", img)
And I get vispytest.png
I'm using Xvfb on Linux.
Xvfb :1 -screen 0 2500x1500x24 -auth localhost
Thank you.
The problem is the focus of the camera, must modify, something similar to the following:
view.camera = scene.TurntableCamera(up='z', fov=60)
Complete code:
import numpy as np
from vispy import io, scene
c = scene.SceneCanvas(keys='interactive', bgcolor='w', dpi=96)
view = c.central_widget.add_view()
view.camera = scene.TurntableCamera(up='z', fov=60)
xx, yy = np.arange(-1,1,.02),np.arange(-1,1,.02)
X,Y = np.meshgrid(xx,yy)
R = np.sqrt(X**2+Y**2)
Z = lambda t : 0.1*np.sin(10*R-2*np.pi*t)
surf = scene.visuals.SurfacePlot(xx, yy, Z(0), color=[0.5, 0.5, 0.5], shading='smooth')
view.add(surf)
img = c.render()
io.write_png("vispytest.png", img)
vispytest.png

Dynamic updating a Matplotlib 3D plot when working with ROS callback

I am trying to update a 3D plot using matplotlib. I am collecting data using ROS. I want to update the plot as I get data. I have looked around and found this,
Dynamically updating plot in matplotlib
but I cannot get it to work. I am very new to python and do not full understand how it works yet. I apologize if my code is disgusting.
I keep get this error.
[ERROR] [WallTime: 1435801577.604410] bad callback: <function usbl_move at 0x7f1e45c4c5f0>
Traceback (most recent call last):
File "/opt/ros/indigo/lib/python2.7/dist-packages/rospy/topics.py", line 709, in _invoke_callback
cb(msg, cb_args)
File "/home/nathaniel/simulation/src/move_videoray/src/move_filtered.py", line 63, in usbl_move
if filter(pos.pose.position.x,pos.pose.position.y,current.position.z):
File "/home/nathaniel/simulation/src/move_videoray/src/move_filtered.py", line 127, in filter
plt.draw()
File "/usr/lib/pymodules/python2.7/matplotlib/pyplot.py", line 555, in draw
get_current_fig_manager().canvas.draw()
File "/usr/lib/pymodules/python2.7/matplotlib/backends/backend_tkagg.py", line 349, in draw
tkagg.blit(self._tkphoto, self.renderer._renderer, colormode=2)
File "/usr/lib/pymodules/python2.7/matplotlib/backends/tkagg.py", line 13, in blit
tk.call("PyAggImagePhoto", photoimage, id(aggimage), colormode, id(bbox_array))
RuntimeError: main thread is not in main loop
This is the code I am trying to run
#!/usr/bin/env python
'''
Ths program moves the videoray model in rviz using
data from the /usble_pose node
based on "Using urdf with robot_state_publisher" tutorial
'''
import rospy
import roslib
import math
import tf
#import outlier_filter
from geometry_msgs.msg import Twist, Vector3, Pose, PoseStamped, TransformStamped
from matplotlib import matplotlib_fname
from mpl_toolkits.mplot3d import Axes3D
import sys
from matplotlib.pyplot import plot
from numpy import mean, std
import matplotlib as mpl
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
#plt.ion()
import matplotlib
mpl.rc("savefig", dpi=150)
import matplotlib.animation as animation
import time
#filter stuff
#window size
n = 10
#make some starting values
#random distance
md =[random.random() for _ in range(0, n)]
#random points
x_list = [random.random() for _ in range(0, n)]
y_list =[random.random() for _ in range(0, n)]
#set up graph
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
#ax.scatter(filt_x,filt_y,filt_depth,color='b')
#ax.scatter(outlier_x,outlier_y,outlier_depth,color='r')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('XY Outlier rejection \n with Mahalanobis distance and rolling mean3')
#set the robot at the center
#//move the videoray using the data from the /pose_only node
def usbl_move(pos,current):
broadcaster = tf.TransformBroadcaster()
if filter(pos.pose.position.x,pos.pose.position.y,current.position.z):
current.position.x = pos.pose.position.x
current.position.y = pos.pose.position.y
broadcaster.sendTransform( (current.position.x,current.position.y,current.position.z),
(current.orientation.x,current.orientation.y,current.orientation.z,current.orientation.w),
rospy.Time.now(), "odom", "body" )
#move the videoray using the data from the /pose_only node
def pose_move(pos,current):
#pos.position.z is in kPa, has to be convereted to depth
# P = P0 + pgz ----> pos.position.z = P0 + pg*z_real
z_real = -1*(pos.position.z -101.325)/9.81;
#update the movement
broadcaster = tf.TransformBroadcaster()
current.orientation.x = pos.orientation.x
current.orientation.y = pos.orientation.y
current.orientation.z = pos.orientation.z
current.orientation.w = pos.orientation.w
current.position.z = z_real
broadcaster.sendTransform( (current.position.x,current.position.y,current.position.z),
(current.orientation.x,current.orientation.y,current.orientation.z,current.orientation.w),
rospy.Time.now(), "odom", "body" )
#call the fitle the date
def filter(x,y,z):
# update the window
is_good = False
x_list.append(x)
y_list.append(y)
x_list.pop(0)
y_list.pop(0)
#get the covariance matrix
v = np.linalg.inv(np.cov(x_list,y_list,rowvar=0))
#get the mean vector
r_mean = mean(x_list), mean(y_list)
#subtract the mean vector from the point
x_diff = np.array([i - r_mean[0] for i in x_list])
y_diff = np.array([i - r_mean[1] for i in y_list])
#combinded and transpose the x,y diff matrix
diff_xy = np.transpose([x_diff, y_diff])
# calculate the Mahalanobis distance
dis = np.sqrt(np.dot(np.dot(np.transpose(diff_xy[n-1]),v),diff_xy[n-1]))
# update the window
md.append( dis)
md.pop(0)
#find mean and standard standard deviation of the standard deviation list
mu = np.mean(md)
sigma = np.std(md)
#threshold to find if a outlier
if dis < mu + 1.5*sigma:
#filt_x.append(x)
#filt_y.append(y)
#filt_depth.append(z)
ax.scatter(x,y,z,color='b')
is_good = True
else:
ax.scatter(x,y,z,color='r')
plt.draw()
return is_good
if __name__ == '__main__':
#set up the node
rospy.init_node('move_unfiltered', anonymous=True)
#make a broadcaster foir the tf frame
broadcaster = tf.TransformBroadcaster()
#make intilial values
current = Pose()
current.position.x = 0
current.position.y = 0
current.position.z = 0
current.orientation.x = 0
current.orientation.y = 0
current.orientation.z = 0
current.orientation.w = 0
#send the tf frame
broadcaster.sendTransform( (current.position.x,current.position.y,current.position.z),
(current.orientation.x,current.orientation.y,current.orientation.z,current.orientation.w),
rospy.Time.now(), "odom", "body" )
#listen for information
rospy.Subscriber("/usbl_pose", PoseStamped, usbl_move,current)
rospy.Subscriber("/pose_only", Pose, pose_move, current);
rospy.spin()
Since this is an old post and still seems to be active in the community, I am going to provide an example, in general, how can we do real-time plotting. Here I used matplotlib FuncAnimation function.
import matplotlib.pyplot as plt
import rospy
import tf
from nav_msgs.msg import Odometry
from tf.transformations import quaternion_matrix
import numpy as np
from matplotlib.animation import FuncAnimation
class Visualiser:
def __init__(self):
self.fig, self.ax = plt.subplots()
self.ln, = plt.plot([], [], 'ro')
self.x_data, self.y_data = [] , []
def plot_init(self):
self.ax.set_xlim(0, 10000)
self.ax.set_ylim(-7, 7)
return self.ln
def getYaw(self, pose):
quaternion = (pose.orientation.x, pose.orientation.y, pose.orientation.z,
pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
yaw = euler[2]
return yaw
def odom_callback(self, msg):
yaw_angle = self.getYaw(msg.pose.pose)
self.y_data.append(yaw_angle)
x_index = len(self.x_data)
self.x_data.append(x_index+1)
def update_plot(self, frame):
self.ln.set_data(self.x_data, self.y_data)
return self.ln
rospy.init_node('lidar_visual_node')
vis = Visualiser()
sub = rospy.Subscriber('/dji_sdk/odometry', Odometry, vis.odom_callback)
ani = FuncAnimation(vis.fig, vis.update_plot, init_func=vis.plot_init)
plt.show(block=True)

Categories