Related
I am using the python bindings for getfem, to that effect I wrote this script, following their tutorial:
import getfem as gf
import numpy as np
import math
center = [0.0, 0.0]
dir = [0.0, 1.0]
radius = 1.0
angle = 0.2 * math.pi
mo = gf.MesherObject("cone", center, dir, radius, angle)
h = 0.1
K = 2
mesh = gf.Mesh("generate", mo, h, K)
outer_faces = mesh.outer_faces()
OUTER_BOUND = 1
mesh.set_region(OUTER_BOUND, outer_faces)
sl = gf.Slice(("none",), mesh, 1)
mfu = gf.MeshFem(mesh, 1)
elements_degree = 2
mfu.set_classical_fem(elements_degree)
mim = gf.MeshIm(mesh, pow(elements_degree, 2))
md = gf.Model("real")
md.add_fem_variable("u", mfu)
md.add_Laplacian_brick(mim, "u")
F = 1.0
md.add_fem_data("F", mfu)
md.set_variable("F", np.repeat(F, mfu.nbdof()))
md.add_source_term_brick(mim, "u", "F")
md.add_Dirichlet_condition_with_multipliers(mim, "u", elements_degree - 1, OUTER_BOUND)
md.solve()
U = md.variable("u")
sl.export_to_vtk("u.vtk", "ascii", mfu, U, "U")
This exports a vtk file. Somewhere, I found a way to display it on a Jupyter notebook:
import pyvista as pv
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1280, 1024))
display.start()
p = pv.Plotter()
m = pv.read("u.vtk")
contours = m.contour()
p.add_mesh(m, show_edges=False)
p.add_mesh(contours, color="black", line_width=1)
p.add_mesh(m.contour(8).extract_largest(), opacity=0.1)
pts = m.points
p.show(window_size=[384, 384], cpos="xy")
display.stop()
It looks awfully compressed for some reason. I am trying to save it as a PNG instead.
Does anyone know how to convert the vtk into a png?
Paraview is depercated in modern systems for all intents and purposes so that's out the gate.
You can generate a screenshot without displaying it with:
import pyvista as pv
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1280, 1024))
display.start()
p = pv.Plotter(off_screen=True)
m = pv.read("u.vtk")
contours = m.contour()
p.add_mesh(m, show_edges=False)
p.add_mesh(contours, color="black", line_width=1)
p.add_mesh(m.contour(8).extract_largest(), opacity=0.1)
pts = m.points
p.show(cpos="xy", screenshot='screenshot.png')
display.stop()
I am reading a shapefile that contains data ranging from 0 to 100 in Python using GDAL. Unfortunately, while it does not give errors, the result is not correct (compared with QGIS). I have tried different NoDataValue, but have not found the right result.
Here is the code:
from osgeo import gdal
from osgeo import ogr
import matplotlib.pyplot as plt
import numpy as np
import glob
import numpy.ma as ma
def Feature_to_Raster(input_shp, output_tiff, cellsize, field_name=True, NoData_value=-9999):
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer(0)
inp_srs = inp_lyr.GetSpatialRef()
# Extent
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
x_ncells = int((x_max - x_min) / cellsize)
y_ncells = int((y_max - y_min) / cellsize)
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,1, gdal.GDT_Float32)
out_source.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))
out_source.SetProjection(inp_srs.ExportToWkt())
out_lyr = out_source.GetRasterBand(1)
out_lyr.SetNoDataValue(NoData_value)
# Rasterize
# print(inp_lyr)
if field_name:
gdal.RasterizeLayer(out_source, [1], inp_lyr, options=["ATTRIBUTE=CT"])
else:
gdal.RasterizeLayer(out_source, [1], inp_lyr, burn_values=[1])
# Save and/or close the data sources
inp_source = None
out_source = None
ds= gdal.Open('name.tif')
ndv= ds.GetRasterBand(1).GetNoDataValue()
bnd1= ds.GetRasterBand(1).ReadAsArray()
bnd1[bnd1==ndv]= np.nan
tt= ma.masked_outside(bnd1, 1,100)
plt.imshow(tt, cmap='jet')
plt.colorbar()
plt.xlabel('Column #')
plt.ylabel('Row #')
plt.show()
# Return
return output_tiff
output_tiff= 'D:/myfolder/name.tif'
input_shp= 'D:/myfolder/cis_SGRDAMID_20101201.shp'
Feature_to_Raster(input_shp, output_tiff, cellsize, field_name=True, NoData_value=-9999)
Ive had more success with the gdal.Rasterize function
See if this solves your problem:
you can replace this:
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,1, gdal.GDT_Float32)
out_source.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))
out_source.SetProjection(inp_srs.ExportToWkt())
out_lyr = out_source.GetRasterBand(1)
out_lyr.SetNoDataValue(NoData_value)
# Rasterize
# print(inp_lyr)
if field_name:
gdal.RasterizeLayer(out_source, [1], inp_lyr, options=["ATTRIBUTE=CT"])
else:
gdal.RasterizeLayer(out_source, [1], inp_lyr, burn_values=[1])
with this:
if field_name:
# This will rasterize your shape file according to the specified attribute field
rasDs = gdal.Rasterize(output_tiff, input_shp,
xRes=cellsize, yRes=cellsize,
outputBounds=[x_min, y_min,x_max, y_max],
noData=NoData_value,
outputType=gdal.GDT_Float32
attribute='CT', # Or whatever your attribute field name is
allTouched=True)
else:
# This will just give 255 where there are vector data since no attribute is defined
rasDs = gdal.Rasterize(output_tiff, input_shp,
xRes=cellsize, yRes=cellsize,
outputBounds=[x_min, y_min,x_max, y_max],
noData=NoData_value,
outputType=gdal.GDT_Float32
allTouched=True)
rasDs = inp_source = None
And always remember to keep your cell-size relevant to your coordinate system, e.g. don't specify in meters when the projection of the shapefile is WGS...
How can I convert an objects position in PyBullet to pixel coordinates & draw a line onto the frame using PyBullet & OpenCV?
We would like to do this because PyBullet native addUserDebugLine() function is not available in DIRECT mode.
import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
def capture_frame(base_pos=[0,0,0], _cam_dist=3, _cam_yaw=45, _cam_pitch=-45):
_render_width, _render_height = VIDEO_RESOLUTION
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(_render_width) / _render_height,
nearVal=0.01, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=_render_width, height=_render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) # ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array, view_matrix, proj_matrix
def render():
frame, vmat, pmat = capture_frame()
p1, cubeOrn = p.getBasePositionAndOrientation(1)
p2, cubeOrn = p.getBasePositionAndOrientation(2)
frame, view_matrix, proj_matrix = capture_frame()
frame = cv2.resize(frame, VIDEO_RESOLUTION)
points = {}
# reshape matrices
my_order = 'C'
pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
vmat = np.array(view_matrix).reshape((4,4), order=my_order)
fmat = vmat.T # pmat.T
# compute origin from origin point in simulation
origin = np.array([0,0,0,1])
frame_origin = (fmat # origin)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
# define unit vectors
unit_vectors = [ np.array([1,0,0,1]),
np.array([0,1,0,1]),
np.array([0,0,1,1]) ]
for col_id, unit_vector in enumerate(unit_vectors):
cur_point = (fmat # unit_vector)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
cv2.imwrite("my_rendering.jpg", frame)
print(p1,p2)
if __name__ == '__main__':
physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
startPos = [1,0,0.2]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
startPos = [0,2,0.2]
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
for i in range (2400):
if i == 2399:
render()
p.stepSimulation()
p.disconnect()
The expected output would be the following frame but with the origin-coordinate frame drawn correctly. E.g. X, Y, and Z axis are colored Red, Blue, and Green respectively.
Since the two R2D2 robots are positioned at [1,0,0] and [0,1,0] respectively, we can see that the coordinate frame is off. (See image below)
We tried the following:
transposing the matrices
not transposing the matrices
changing the order of how we compute fmat e.g. pmat # vmat instead of vmat # pmat etc.
Any help is appreciated.
After a lot of fiddling, I came to a solution.
Playing with it for a while, I came to a point where it looked almost OK except for a rotation of the axes given by the yaw angle. So, I did a second call to computeViewMatrixFromYawPitchRoll but with the opposite yaw in order to compute the transformation for the axes.
Unfortunately, I'm not sure about why this works... But it works!
Note: base_pos, _cam_dist, _cam_yaw and _cam_pitch have been displaced into render() Note also: the up direction has been reversed too (don't ask why... :-) ) A pretty messy explanation, I must admit...
import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
import os
VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
K=np.array([[1280,0,0],[0,720,0],[0,0,1]])
def capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch):
_render_width, _render_height = VIDEO_RESOLUTION
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(_render_width) / _render_height,
nearVal=0.01, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=_render_width, height=_render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) # ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array, view_matrix, proj_matrix
def render():
p1, cubeOrn = p.getBasePositionAndOrientation(1)
p2, cubeOrn = p.getBasePositionAndOrientation(2)
base_pos=[0,0,0]
_cam_dist=3
_cam_yaw=45
_cam_pitch=-30
frame, view_matrix, proj_matrix = capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch)
frame = cv2.resize(frame, VIDEO_RESOLUTION)
points = {}
# inverse transform
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=-_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
my_order = 'C'
pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
vmat = np.array(view_matrix).reshape((4,4), order=my_order)
fmat = pmat # vmat.T
# compute origin from origin point in simulation
origin = np.array([0,0,0,1])
frame_origin = (fmat # origin)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
# define unit vectors
unit_vectors = [ np.array([1,0,0,1]),
np.array([0,1,0,1]),
np.array([0,0,-1,1]) ]
for col_id, unit_vector in enumerate(unit_vectors):
cur_point = (fmat # unit_vector)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
cv2.imwrite("my_rendering.jpg", frame)
print(p1,p2)
if __name__ == '__main__':
physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
#physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
#arrows = p.loadURDF("arrows.urdf")
startPos = [1,0,0.2]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
startPos = [0,2,0.2]
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
for i in range (2400):
if i == 2399:
render()
p.stepSimulation()
p.disconnect()
Here is the result:
Best regards.
I am attempting to find a way to visualize the separate regions/phases of the MJO. I believe one way to do so would be by plotting the longitude lines that separate each phase region (at roughly 60E, 80E, 100E, 120E, 140E, 160E, 180), but I am unsure if it is possible to add to my existing plots.
I am using GRID-Sat B1 data from NCEI. Here is what my current code looks like:
import matplotlib.pyplot as plt
from metpy.plots import declarative, colortables
import cartopy.crs as ccrs
import xarray as xr
file = "GRIDSAT-B1.2003.11.23.00.v02r01.nc"
dataset = xr.open_dataset(file)
vtime = dataset.time.values.astype('datetime64[s]').astype('O')
date_long = vtime[0]
date = date_long.strftime("%d-%b-%Y-%HZ")
# Create water vapor image
img = declarative.ImagePlot()
img.data = dataset
img.field = 'irwvp'
img.colormap = 'WVCIMSS_r'
img.image_range = (180, 280)
panel = declarative.MapPanel()
panel.layers = ['coastline', 'borders']
panel.title = f'GridSat-B1 (Water Vapor Imagery): {date}'
panel.projection = (ccrs.Mollweide(central_longitude=-240))
panel.area = ([-370, -140, -30, 30])
panel.layout = (2, 1, 2)
panel.plots = [img]
# Create the IR image
img2 = declarative.ImagePlot()
img2.data = dataset
img2.field = 'irwin_cdr'
img2.colormap = 'turbo_r' #maybe use cubehelix instead?
img2.image_range = (180, 300)
panel2 = declarative.MapPanel()
panel2.layers = ['coastline', 'borders']
panel2.title = f'GridSat-B1 (Infrared Imagery): {date}'
panel2.projection = (ccrs.Mollweide(central_longitude=-240))
panel2.area = ([-370, -140, -30, 30])
panel2.layout = (2, 1, 1)
panel2.plots = [img2]
# Plot both panels in one figure
pc = declarative.PanelContainer()
pc.size = (20, 14)
pc.panels = [panel, panel2]
pc.show()
Here is the current output that is created when I run the script:
Nov03.png
Any help/suggestions are appreciated - thanks in advance!
There's nothing built into MetPy's declarative interface, but fortunately the MapPanel objects expose a .ax attribute that gives you a Matplotlib Axes object and all its plotting methods:
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import metpy.plots as mpplots
import numpy as np
import xarray as xr
file = "/Users/rmay/Downloads/GRIDSAT-B1.2003.11.23.00.v02r01.nc"
dataset = xr.open_dataset(file)
vtime = dataset.time.values.astype('datetime64[s]').astype('O')
date_long = vtime[0]
date = date_long.strftime("%d-%b-%Y-%HZ")
# Create water vapor image
img = mpplots.ImagePlot()
img.data = dataset
img.field = 'irwvp'
img.colormap = 'WVCIMSS_r'
img.image_range = (180, 280)
panel = mpplots.MapPanel()
panel.layers = ['coastline', 'borders']
panel.title = f'GridSat-B1 (Water Vapor Imagery): {date}'
panel.projection = ccrs.Mollweide(central_longitude=-240)
panel.area = (-370, -140, -30, 30)
panel.layout = (2, 1, 2)
panel.plots = [img]
# Create the IR image
img2 = mpplots.ImagePlot()
img2.data = dataset
img2.field = 'irwin_cdr'
img2.colormap = 'turbo_r' #maybe use cubehelix instead?
img2.image_range = (180, 300)
panel2 = mpplots.MapPanel()
panel2.layers = ['coastline', 'borders']
panel2.title = f'GridSat-B1 (Infrared Imagery): {date}'
panel2.projection = ccrs.Mollweide(central_longitude=-240)
panel2.area = (-370, -140, -30, 30)
panel2.layout = (2, 1, 1)
panel2.plots = [img2]
# Plot both panels in one figure
pc = mpplots.PanelContainer()
pc.size = (20, 14)
pc.panels = [panel, panel2]
lons = np.array([60, 80, 100, 120, 140, 160, 180]).reshape(1, -1)
lats = np.linspace(-90, 90).reshape(-1, 1)
# Match up the arrays into 2xN arrays fit to plot in call
lons, lats = np.broadcast_arrays(lons, lats)
# Needs to be *after* the panels are assigned to a PanelContainer
# Using Geodetic gives lines interpolated on the curved globe
panel.ax.plot(lons, lats, transform=ccrs.Geodetic(), color='black', linewidth=3)
panel2.ax.plot(lons, lats, transform=ccrs.Geodetic(), color='black', linewidth=3)
pc.show()
(Note: it's not recommended to import from metpy's declarative module directly since that's an implementation detail subject to change--just get things from metpy.plots). So this is using Matplotlib's standard call to plot to draw the lines. Another option would be to use CartoPy's Gridliner.
It is my first post on StackOverflow.
I am writing a Mayavi Python program. Could anybody tell me how to update/modify the color of a point interactively? For example, in points3d(), changing the color of a point in real-time when I interactively modify its position.
I tried to do something under #on_trait_change, but it doesn't work. Color cannot be changed.
The following is my code:
import mayavi
import mayavi.mlab
from numpy import arange, pi, cos, sin
from traits.api import HasTraits, Range, Instance, \
on_trait_change
from traitsui.api import View, Item, HGroup
from mayavi.core.api import PipelineBase
from mayavi.core.ui.api import MayaviScene, SceneEditor, \
MlabSceneModel
def luc_func(x, y, z):
return x + y + z;
class Visualization(HasTraits):
x1 = Range(1, 30, 5)
z1 = Range(1, 30, 5)
scene = Instance(MlabSceneModel, ())
def __init__(self):
# Do not forget to call the parent's __init__
HasTraits.__init__(self)
z = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
y = [1,1,1,1,1,2,2,2,2,2,3,3,3,3,3,4,4,4,4,4,5,5,5,5,5]
x = [1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5,1,2,3,4,5]
self.plot = self.scene.mlab.points3d(x, y, z, luc_func, scale_mode = 'none')
#self.plot2 = self.scene.mlab.points3d(z, x, y, color = (0, 0, 1))
#on_trait_change('x1,z1')
def update_plot(self):
x = [1,2,3,4,self.x1,1,2,3,4,self.x1,1,2,3,4,self.x1,1,2,3,4,self.x1,1,2,3,4,self.x1]
z = [1,1,1,1,self.z1,1,1,1,1,self.z1,1,1,1,1,self.z1,1,1,1,1,self.z1,1,1,1,1,self.z1]
luc_func = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,self.z1]
self.plot.mlab_source.reset(x = x, z = z, luc_func = luc_func)
#self.plot2.mlab_source.set(y = y, z = z)
# the layout of the dialog created
view = View(Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=250, width=300, show_label=False),
HGroup(
'_', 'x1', "z1",
),
)
visualization = Visualization()
visualization.configure_traits()
Thanks for your help!
I have noticed a bug in the interactivity of points3d very similar to what you are describing here. I don't know exactly what is the origin of this bug but I regularly use the following workaround. The basic idea is to avoid mlab.points3d and instead call mlab.pipeline.glyph directly, as in:
def virtual_points3d(coords, figure=None, scale_factor=None, color=None,
name=None):
c = np.array(coords)
source = mlab.pipeline.scalar_scatter( c[:,0], c[:,1], c[:,2],
figure=figure)
return mlab.pipeline.glyph( source, scale_mode='none',
scale_factor=scale_factor,
mode='sphere', figure=figure, color=color, name=name)
Later you can change the colors by referring to the vtk object directly, rather than the mayavi trait that isn't connected properly:
glyph = virtual_points3d(coords)
glyph.mlab_source.dataset.point_data.scalars = new_values