How to plot in real time with VisPy library? - python

I wrote a script for modeling the evolution of a pandemic (with graphs and scatter plots).
I tried several libraries to display results in real-time (8 countries x 500 particles):
Matplotlib (not fast enough)
PyQtGraph (better but still not fast enough)
OpenGL (good, but I did not find how to use it in 2D efficiently, using subplots, titles, legends...)
Bokeh (good, but the scatter plots "blink" each time their particles turn color. Code is here if you are interested)
That is why I am turning now to VisPy.
I am using a class Visualizer to display the results, with the method app.Timer().connect to manage the real-time side. Pandemic code is here.
from Pandemic import *
from vispy.plot import Fig
from vispy import app
class Visualizer:
def __init__(self, world):
self.fig = Fig()
self.world = world
self.traces = {}
#Scatter plots
for idx, c in world.countries.items():
pos_x = idx % self.world.nb_cols
pos_y = idx // self.world.nb_cols
subplot = self.fig[pos_y, pos_x]
data = np.array([c.x_coord, c.y_coord]).reshape(-1,2)
self.traces[idx] = subplot.plot(data, symbol='o', width=0, face_color=c.p_colors, title='Country {}'.format(idx+1))
def display(self):
for idx, c in self.world.countries.items():
data = np.array([c.x_coord, c.y_coord]).reshape(-1,2)
self.traces[idx].set_data(data, face_color=c.p_colors)
def update(self, event):
self.world.update(quarantine=False)
self.display()
def animation(self):
self.timer = app.Timer()
self.timer.connect(self.update)
self.timer.start(0)
self.start()
def start(self):
if (sys.flags.interactive != 1):
self.status = app.run()
if __name__ == '__main__':
w = World(move=0.001)
for i in range(8):
w.add_country(nb_S=500)
v = Visualizer(w)
v.animation()
The scatter plots "blink" each time their particles turn color, as with Bokeh. Am I doing something wrong?
Is there a more efficient way for real-time display, maybe using vispy.gloo or vispy.scene? (It is slower than pyqtgraph.opengl for the moment)

We can efficiently plot in real time by using vispy.gloo module to leverage the power of GPU. Here is one way of doing it :
1) Build a class that inherits vispy.app.Canvas class.
2) Create an OpenGL Program whose inputs are shaders. This object allows us to link our data to shader variables. Each dot on the canvas depends on these variable values (describing its coordinate, color, etc). For example, it is way harder for displaying text (titles, labels, etc) than with Matplotlib library. Here is a deeper explanation of the process.
3) Set a timer connected to the function we want to call repeatedly (real-time side).
The vispy.scene module, dedicated to the high-level visualization interfaces for scientists, is still experimental. Maybe this is the reason why my first code got some bugs.
Here is my new code.

Related

How to plot many 3D cubes with different colors and opacities

Goal
I want to plot a large number of cubes (arranged in a 3D grid) with different colors and opacities.
Current State and question
I have come up with a solution using vispy, but the performance is very poor - drawing takes very long and the window is very unresponsive. Also, there seem to be some glitches in the visualization, but I could live with those.
Is there a more efficient/elegant way to implement that? I am open to using other packages (I have tried open3d but found it difficult to specify colors and opacities - the documentation is not very verbose). However, I need to use python.
What I did so far
The first problem I had to solve with vispy was that I was unable to create cubes at custom positions. I therefore wrote a subclass that can do that:
import vispy.visuals
from vispy.geometry import create_box
class PositionedCubeVisual(vispy.visuals.BoxVisual):
def __init__(self, size=1, position=(0, 0, 0), width_segments=1,
height_segments=1, depth_segments=1, planes=None,
vertex_colors=None, face_colors=None,
color=(0.5, 0.5, 1, 1), edge_color=None, **kwargs):
vertices, filled_indices, outline_indices = create_box(
size, size, size, width_segments, height_segments,
depth_segments, planes)
for column, pos in zip(vertices['position'].T, position):
column += pos
self._mesh = vispy.visuals.MeshVisual(vertices['position'], filled_indices,
vertex_colors, face_colors, color)
if edge_color:
self._border = vispy.visuals.MeshVisual(vertices['position'], outline_indices,
color=edge_color, mode='lines')
else:
self._border = vispy.visuals.MeshVisual()
vispy.visuals.CompoundVisual.__init__(self, [self._mesh, self._border], **kwargs)
self.mesh.set_gl_state(polygon_offset_fill=True,
polygon_offset=(1, 1), depth_test=True)
PositionedCube = vispy.scene.visuals.create_visual_node(PositionedCubeVisual)
I then plot the cubes as follows:
import numpy as np
import vispy.scene
def plot_grid_cubes(x, y, z, c=None, size=1, alpha=0.1, edge_color="black",
cmap="viridis", bgcolor="#FFFFFF"):
canvas = vispy.scene.SceneCanvas(keys='interactive', show=True)
view = canvas.central_widget.add_view()
view.bgcolor = bgcolor
view.camera = 'turntable'
c = get_color_array(c, alpha, min(len(x), len(y), len(z)), cmap)
for xx, yy, zz, cc in zip(x, y, z, c):
cube = PositionedCube(size, (xx, yy, zz), color=cc, edge_color=edge_color, parent=view.scene)
canvas.app.run()
def get_color_array(c, alpha, size, cmap):
if c is not None:
cmap = cm.get_cmap(cmap)
if hasattr(c, "__iter__"):
c = np.array(c, copy=True, dtype=float)
c -= c.min()
c *= 255/c.max()
return cmap(c.astype(int), alpha)
else:
color = np.ones((size, 4))
color[:, 3] = alpha
return color
This can then be applied as follows:
plot_grid_cubes([0, 1], [0, 1], [0, 1], c=[0.3, 0.5], alpha=[0.3, 0.8])
The example above works great, but it becomes poor if I plot thousands of cubes.
Regarding performance on vispy, you may want to read this:
Each Visual object in VisPy is an OpenGL Program consisting of at least a vertex shader and a fragment shader (see Modern OpenGL). In general, except for some very specific cases, OpenGL Programs can only be executed one at a time by a single OpenGL context. This means that in your VisPy visualization each Visual object you tell VisPy to draw will extend how long each update (draw) takes. When frames per second (FPS) or responsiveness are a concern, this means each Visual you add reduces the performance of your visualization.
While VisPy is constantly striving to improve performance, there are things that you can do in the mean time (depending on your particular case). The most important change that you can make is to lower the number of Visual objects you have. For a lot of Visuals it is possible to combine them into one by putting a little extra work into the data you provide them. For example, instead of creating 10 separate LineVisuals, create 1 LineVisual that draws 10 lines. While this is a simple example, the same concept applies to other types of Visuals in VisPy and more complex use cases. As a last resort for the most complex cases, a custom Visual (custom shader code) may be necessary. Before writing a custom Visual, check with VisPy maintainers by asking a question on gitter or creating a question as a GitHub issue.
Now for the BoxVisual this is a little difficult because as far as I can tell this "convenience" class doesn't allow you to make many boxes with a single BoxVisual instance. Since you are already comfortable making a Visual subclass I would recommend making the MeshVisuals yourself and providing the vertices for each box as one single position array.
As for not being able to specify position, this won't apply to your custom Visual class that will use the all-in-one array since you'll be providing each position at the beginning, but I thought I should still describe it. It is unfortunate that the BoxVisual is trying to be so convenient that it isn't helpful in this case since other Visuals allow you to pass your vertex positions on creation. In other cases or when you only want to make small modifications, typically what is done in VisPy is to use one or more "transforms" added to the Visual to shift (transform) the positions passed to the Visual. For example:
from vispy.visuals.transforms import STTransform
cube = ... create a cube visual ...
cube.transform = STTransform(scale=(1.0, 1.0, 1.0), translate=(0.0, 0.0, 0.0))
where you change the scale and translate values as needed to effect (X, Y, Z) coordinate values. After this, if you modify the cube.transform.translate = (new_x, new_y, new_z) property (or .scale or use another transform class) directly this has the benefit of only modifying that property on the GPU and not needing to recompute and resend the vertex positions (better performance).

Fast, Real-time plotting of points using pyqtgraph and a LiDAR

I want to create a real-time, point plotting GUI. I am using the Scanse Sweep LiDAR, and at each sweep of this LiDAR (working between 1 - 10Hz) I receive approximately 1000 points (x, y) describing the LiDARs surrounding. This is a 2D LiDAR.
I have looked everywhere and tried countless of code snippets for pyqtgraph, but either it crashes, is super slow or doesn't work at all.
Is there a straight-forward way of creating a plotter window and upon each new scan/data delivery, push those points to the plotter window?
Thankful for any kind of help
It is unclear to me what exactly you want to do, so I assume that you want to make a scatter plot with a 1000 points that are refreshed 10 times a second. Next time please include your code so that we can reproduce your issues and see what you want to achieve.
In my experience PyQtGraph is the fastest option in Python. It can easily plot a 1000 points at 10 Hz. See the example below.
#!/usr/bin/env python
from PyQt5 import QtCore, QtWidgets
import pyqtgraph as pg
import numpy as np
class MyWidget(pg.GraphicsWindow):
def __init__(self, parent=None):
super().__init__(parent=parent)
self.mainLayout = QtWidgets.QVBoxLayout()
self.setLayout(self.mainLayout)
self.timer = QtCore.QTimer(self)
self.timer.setInterval(100) # in milliseconds
self.timer.start()
self.timer.timeout.connect(self.onNewData)
self.plotItem = self.addPlot(title="Lidar points")
self.plotDataItem = self.plotItem.plot([], pen=None,
symbolBrush=(255,0,0), symbolSize=5, symbolPen=None)
def setData(self, x, y):
self.plotDataItem.setData(x, y)
def onNewData(self):
numPoints = 1000
x = np.random.normal(size=numPoints)
y = np.random.normal(size=numPoints)
self.setData(x, y)
def main():
app = QtWidgets.QApplication([])
pg.setConfigOptions(antialias=False) # True seems to work as well
win = MyWidget()
win.show()
win.resize(800,600)
win.raise_()
app.exec_()
if __name__ == "__main__":
main()
The way it works is as follows. By plotting an empty list a PlotDataItem is created. This represents a collection of points. When new data points arrive, the setData method is used to set them as the data of the PlotDataItem, which removes the old points.

Live data plotting lags after a while

I've been writing a program for a workstation automation in a laboratory. One of the instruments I communicate is called beam profiler, it basically reads light inputs from two ortogonal directions (x,y). Once the input is read, I need to convert it to a 2D image, for that I use the numpy meshgrid and I'm able to obtain my desired output.
For better clarity, see image bellow. The two Gaussian lines in the x and y axis are my raw input and the colored figure is after processed with meshgrid.
I divide my software in two parts for this. First I create another QT thread that initializes my device and runs in a loop getting the data and processing it. Then this thread sends a signal to the main thread with the values.
In the main thread I get the values, plot the graph and update the gui screen.
It is already working, the problem is that when I start the beam profiler readings the software starts getting slower as time passes. At first I thought it was because of the data processing but it doesn't make sense because it is running in the second thread and when I start the device there is no lag.
It seems like if it were "saving" the data in memory and getting slower, which is weird since I'm using the set_data and draw methods for plotting.
Note: if I close the device readings inside my software the lags stops and if I start it again, it starts good but then lags as time passes.
Any incoming help is much appreciated!
Data acquisition thread code:
class ThreadGraph(QtCore.QThread):
_signalValues = QtCore.pyqtSignal(float, float, float, float, float, float, float, float)
_signalGraph = QtCore.pyqtSignal(np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray)
_signalError = QtCore.pyqtSignal(str)
BEAMstatus = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
super(ThreadGraph, self).__init__(parent)
self.slit = 0
self.state = False
#Thread starts
def run(self):
self.init() #Device initialization (Not relevant, therefore omitted)
time.sleep(0.1)
while self.state == True: #Thread loop (data acquisition)
self.emitValues() #Fun to get the data and emit
time.sleep(0.016)
self.emitGraph() #Process data into 2D and emit
try: #When while is over, terminate the thread
self.beam.close(self.session)
except RuntimeError as err:
print err
self.quit()
def emitGraph(self): #Use the data acquired to to generate 2D image and emit
xx, yy = np.meshgrid(self.slit_data_int[self.slit][0::10], self.slit_data_int[self.slit+1][0::10])
zz = xx * yy
self._signalGraph.emit(
self.slit_data_pos[self.slit][0::10],
self.slit_data_int[self.slit][0::10],
self.slit_data_pos[self.slit + 1][0::10],
self.slit_data_int[self.slit + 1][0::10],
zz
)
def emitValues(self):
try: #Try to get data from device (data is stored in calculation_result)
self.slit_data_pos, self.slit_data_int, self.calculation_result, self.power, self.power_saturation, self.power_intensities = self.beam.get_slit_scan_data(self.session)
except RuntimeError as err:
self._signalError.emit(str(err))
return
else: #emit data to gui main thread
self._signalValues.emit(
self.calculation_result[self.slit].peakPosition,
self.calculation_result[self.slit + 1].peakPosition,
self.calculation_result[self.slit].peakIntensity,
self.calculation_result[self.slit + 1].peakIntensity,
self.calculation_result[self.slit].centroidPosition,
self.calculation_result[self.slit + 1].centroidPosition,
self.calculation_result[self.slit].gaussianFitDiameter,
self.calculation_result[self.slit + 1].gaussianFitDiameter
)
Main Gui code:
class BP209_class(QtGui.QWidget):
def __init__(self, vbox, slit25, slit5, peakposx, peakposy, peakintx, peakinty, centroidposx, centroidposy, mfdx, mfdy):
QtGui.QWidget.__init__(self)
#Initialize a bunch of gui variables
self.matplotlibWidget = MatplotlibWidget('2d')
self.vboxBeam = vbox
self.vboxBeam.addWidget(self.matplotlibWidget)
self.vboxBeam.addWidget(self.matplotlibWidget.canvastoolbar)
#Create the thread and connects
self.thread = ThreadGraph(self)
self.thread._signalError.connect(self.Error_Handling)
self.thread._signalValues.connect(self.values_update)
self.thread._signalGraph.connect(self.graph_update)
self.thread.BEAMstatus.connect(self.Status)
#Initialize variables for plots
self.zz = zeros([750, 750])
self.im = self.matplotlibWidget.axis.imshow(self.zz, cmap=cm.jet, origin='upper', vmin=0, vmax=1, aspect='auto', extent=[-5000,5000,-5000,5000])
self.pv, = self.matplotlibWidget.axis.plot(np.zeros(750) , np.zeros(750) , color="white" , alpha=0.6, lw=2)
self.ph, = self.matplotlibWidget.axis.plot(np.zeros(750) , np.zeros(750), color="white" , alpha=0.6, lw=2)
self.matplotlibWidget.figure.subplots_adjust(left=0.00, bottom=0.01, right=0.99, top=1, wspace=None, hspace=None)
self.matplotlibWidget.axis.set_xlim([-5000, 5000])
self.matplotlibWidget.axis.set_ylim([-5000,5000])
def __del__(self): #stop thread
self.thread.state = False
self.thread.wait()
def start(self): #start thread
if self.thread.state == False:
self.thread.state = True
self.thread.start()
else:
self.thread.state = False
self.thread.wait()
#Slot that receives data from device and plots it
def graph_update(self, slit_samples_positionsX, slit_samples_intensitiesX, slit_samples_positionsY, slit_samples_intensitiesY, zz):
self.pv.set_data(np.divide(slit_samples_intensitiesX, 15)-5000, slit_samples_positionsX)
self.ph.set_data(slit_samples_positionsY, np.divide(slit_samples_intensitiesY, 15)-5000)
self.im.set_data(zz)
self.im.autoscale()
self.matplotlibWidget.canvas.draw()
Edit: I also have a camera attached to my system and I display it also in the gui using opencv. I noticed that if I start the cam the beam profiler's fps reduce to almost a half. So, maybe a QT paint optimization would be the way to go?
Calls to canvas.draw() are expensive. You are likely acquiring data faster than drawing commands can complete. This will cause paint events to get queued up and your plot will appear to lag. This blog post details a method that avoids calling canvas.draw() and can be used to speed up matplotlib realtime plotting.
If this is still not fast enough you may have to lower the acquisition rate, implement some form of frame skipping mechanism or use a different plotting library better optimised for speed.

Plotting real time data with PyQt

So I'm trying to plot real time data with PyQt. I have it working in a sense, but matplotlib seems to be slowing it down a lot. If I reduce the number of plots I can get the sample rate I want. I have two timer events, one that gathers data and another that plots, with a ratio of 10 to 1.
Searching for a fix I found about Blitting with Matplotlib from SO, and was led to tutorials like this. The problem I'm seeing is that this is only dealing with the plotting part. Every attempt I have made at sampling and plotting a portion of the data I've gathered ends in a crash.
So an outline of what I'd like to do would be this
class graph(ParentMplCanvas):
def __init__(self, *args, **kwargs):
self.axes = fig.add_subplot(111)
self.x = range(1000)
self.data = np.zeros(1000)
#set timer for data to be sampled once every 10ms.
self.updateData()
self.line, = self.ax.plot(self.x,self.data, animated=True)
# Update the plot every second
self.gTimer = fig.canvas.new_timer(interval=1000)
self.gTimer.add_callback(self.update_figure)
self.gtimer.start()
def updateData(self):
self.i += 1
#append with 0's if self.i > 1000
self.data[self.i] = self.funcToGrabCurrentValFromDevice()
self.updateTimer()
def updateTimer(self):
self.dTimer = Timer(0.01,updateData)
self.dTimer.start()
class ApplicationWindow(gui.QMainWindow):
some stuff to call docked windows and the above graph in a docked window see [how I did it here][2]
Maybe I am just not understanding the blitting, but everything I'm seeing there they already have all the data. Any time I've tried to just access a portion of the data it seems to crash the program. I'm trying to just plot a 100 sample region at a time and have it continuously update.
Where I am lost:
How do I properly write update_figure so that I can plot the last 100 (or n) data points that were sampled?

pyqtgraph/chaco/guiqwt: Fast scrolling timetrace demo

I want to implement a fast scrolling timetrace tool in python. The timetrace data is already all in memory in a numpy array and is big (>1e6 samples). I need a tool for quick visual inspection.
I already tried using Matplotlib+PySide but the update speed is not fast enough.
Can you reproduce the Matplotlib+Pyside demo in another toolkit like pygraphqt/chaco/quiqwt? I don't know any of them and I'm willing to learn the one that perform better in this application.
To be useful in my workflow, the chosen framework should allow to run the plot from an interactive ipython session and should be fast and extensible (eventually I will need several plots scrolled in sync on the same windows). In principle pyqtgraph, guiqwt or chaco all seem good candidates. But let judge on a real example.
Thanks.
Here's the pyqtgraph version. I tried to keep the code as similar as I could to the original demo. On my system, pyqtgraph only runs about 5x faster than matplotlib, and is still pretty slow (~1fps) when all of the data is visible. The major performance differences between matplotlib and pyqtgraph are in throughput--how rapidly new data can be plotted.
For better performance, I'd recommend looking at some of the GPU-based plotting libraries like visvis or galry. Pyqtgraph will be adding GPU support in the future, but it's not there yet. There are some efforts to bring matplotlib to the GPU as well, but I haven't seen any results from that yet..
## adapted from http://stackoverflow.com/questions/16824718/python-matplotlib-pyside-fast-timetrace-scrolling
from PySide import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
N_SAMPLES = 1e6
def test_plot():
time = np.arange(N_SAMPLES)*1e-3
sample = np.random.randn(N_SAMPLES)
plt = pg.PlotWidget(title="Use the slider to scroll and the spin-box to set the width")
plt.addLegend()
plt.plot(time, sample, name="Gaussian noise")
q = ScrollingToolQT(plt)
return q # WARNING: it's important to return this object otherwise
# python will delete the reference and the GUI will not respond!
class ScrollingToolQT(object):
def __init__(self, fig):
# Setup data range variables for scrolling
self.fig = fig
self.xmin, self.xmax = fig.plotItem.vb.childrenBounds()[0]
self.step = 1 # axis units
self.scale = 1e3 # conversion betweeen scrolling units and axis units
# Retrive the QMainWindow used by current figure and add a toolbar
# to host the new widgets
self.win = QtGui.QMainWindow()
self.win.show()
self.win.resize(800,600)
self.win.setCentralWidget(fig)
self.toolbar = QtGui.QToolBar()
self.win.addToolBar(QtCore.Qt.BottomToolBarArea, self.toolbar)
# Create the slider and spinbox for x-axis scrolling in toolbar
self.set_slider(self.toolbar)
self.set_spinbox(self.toolbar)
# Set the initial xlimits coherently with values in slider and spinbox
self.set_xlim = self.fig.setXRange
self.set_xlim(0, self.step)
def set_slider(self, parent):
# Slider only support integer ranges so use ms as base unit
smin, smax = self.xmin*self.scale, self.xmax*self.scale
self.slider = QtGui.QSlider(QtCore.Qt.Horizontal, parent=parent)
self.slider.setTickPosition(QtGui.QSlider.TicksAbove)
self.slider.setTickInterval((smax-smin)/10.)
self.slider.setMinimum(smin)
self.slider.setMaximum(smax-self.step*self.scale)
self.slider.setSingleStep(self.step*self.scale/5.)
self.slider.setPageStep(self.step*self.scale)
self.slider.setValue(0) # set the initial position
self.slider.valueChanged.connect(self.xpos_changed)
parent.addWidget(self.slider)
def set_spinbox(self, parent):
self.spinb = QtGui.QDoubleSpinBox(parent=parent)
self.spinb.setDecimals(3)
self.spinb.setRange(0.001, 3600.)
self.spinb.setSuffix(" s")
self.spinb.setValue(self.step) # set the initial width
self.spinb.valueChanged.connect(self.xwidth_changed)
parent.addWidget(self.spinb)
def xpos_changed(self, pos):
#pprint("Position (in scroll units) %f\n" %pos)
# self.pos = pos/self.scale
pos /= self.scale
self.set_xlim(pos, pos + self.step, padding=0)
def xwidth_changed(self, xwidth):
#pprint("Width (axis units) %f\n" % step)
if xwidth <= 0: return
self.step = xwidth
self.slider.setSingleStep(self.step*self.scale/5.)
self.slider.setPageStep(self.step*self.scale)
old_xlim = self.fig.plotItem.vb.viewRange()[0]
self.xpos_changed(old_xlim[0] * self.scale)
if __name__ == "__main__":
app = pg.mkQApp()
q = test_plot()
app.exec_()

Categories