storages in OpenCV with Python - python

I want to find contours in an image and further process them e.g. drawing them on the image.
To do that I have two functions running in different threads:
storage = cv.CreateMemStorage(0)
contour = cv.FindContours(inData.content, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)
and
while contours:
bound_rect = cv.BoundingRect(list(contours))
contours = contours.h_next()
pt1 = (bound_rect[0], bound_rect[1])
pt2 = (bound_rect[0] + bound_rect[2], bound_rect[1] + bound_rect[3])
cv.Rectangle(inImg.content, pt1, pt2, cv.CV_RGB(255,0,0), 1)
Each function runs in a loop processing one image after the other.
When a function is done it puts the image in a buffer from which the other function can get it.
This works except that in the result the contours are drawn in the image one or two images before their corresponding image.
I think this has something to do with the storage of OpenCV but I don't understand why the storage is needed and what it does
EDIT Here is some more code:
My program is meant to be a node based image analasys software.
This is how the node graph of my current code looks like:
|---------| |--------|
|-----| |-----|------>|Threshold|--->|Contours|--->|-------------| |------|
|Input|--->|Split| |---------| |--------| |Draw Contours|--->|Output|
|-----| |-----|----------------------------------->|-------------| |------|
This is the class from which all nodes derive:
from Buffer import Buffer
from threading import Thread
from Data import Data
class Node(Thread):
def __init__(self, inputbuffers, outputbuffers):
Thread.__init__(self)
self.inputbuffers = inputbuffers
self.outputbuffers = outputbuffers
def getInputBuffer(self, index):
return self.inputbuffers[index]
def getOutputBuffer(self, index):
return self.outputbuffers[index]
def _getContents(self, bufferArray):
out = []
for bufferToGet in bufferArray:
if bufferToGet and bufferToGet.data:
out.append(bufferToGet.data)
for bufferToGet in bufferArray:
bufferToGet.data = None
return out
def _allInputsPresent(self):
for bufferToChk in self.inputbuffers:
if not bufferToChk.data:
return False
return True
def _allOutputsEmpty(self):
for bufferToChk in self.outputbuffers:
if bufferToChk.data != None:
return False
return True
def _applyOutputs(self, output):
for i in range(len(output)):
if self.outputbuffers[i]:
self.outputbuffers[i].setData(output[i])
def run(self):
#Thread loop <------------------------------------
while True:
while not self._allInputsPresent(): pass
inputs = self._getContents(self.inputbuffers)
output = [None]*len(self.outputbuffers)
self.process(inputs, output)
while not self._allOutputsEmpty(): pass
self._applyOutputs(output)
def process(self, inputs, outputs):
'''
inputs: array of Data objects
outputs: array of Data objects
'''
pass
The nodes pass around these Data objects:
class Data(object):
def __init__(self, content = None, time = None, error = None, number = -1):
self.content = content #Here the actual data is stored. Mostly images
self.time = time #Not used yet
self.error = error #Not used yet
self.number = number #Used to see if the correct data is put together
This are the nodes:
from Node import Node
from Data import Data
import copy
import cv
class TemplateNode(Node):
def __init__(self, inputbuffers, outputbuffers):
super(type(self), self).__init__(inputbuffers, outputbuffers)
def process(self, inputs, outputs):
inData = inputs[0]
#Do something with the content e.g.
#cv.Smooth(inData.content, inData.content, cv.CV_GAUSSIAN, 11, 11)
outputs[0] = inData
class InputNode(Node):
def __init__(self, inputbuffers, outputbuffers):
super(InputNode, self).__init__(inputbuffers, outputbuffers)
self.capture = cv.CaptureFromFile("video.avi")
self.counter = 0
def process(self, inputs, outputs):
image = cv.QueryFrame(self.capture)
if image:
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
x = 30
y = 50
cv.PutText(image, str(self.counter), (x,y), font, 255)
outputs[0] = Data(image,None,None,self.counter)
self.counter = self.counter+1
class OutputNode(Node):
def __init__(self, inputbuffers, outputbuffers, name):
super(type(self), self).__init__(inputbuffers, outputbuffers)
self.name = name
def process(self, inputs, outputs):
if type(inputs[0].content) == cv.iplimage:
cv.ShowImage(self.name, inputs[0].content)
cv.WaitKey()
class ThresholdNode(Node):
def __init__(self, inputbuffers, outputbuffers):
super(type(self), self).__init__(inputbuffers, outputbuffers)
def process(self, inputs, outputs):
inData = inputs[0]
inimg = cv.CreateImage(cv.GetSize(inData.content), cv.IPL_DEPTH_8U, 1);
cv.CvtColor(inData.content, inimg, cv.CV_BGR2GRAY)
outImg = cv.CreateImage(cv.GetSize(inimg), cv.IPL_DEPTH_8U, 1);
cv.Threshold(inimg, outImg, 70, 255, cv.CV_THRESH_BINARY_INV);
inData.content = outImg
outputs[0] = inData
class SplitNode(Node):
def __init__(self, inputbuffers, outputbuffers):
super(type(self), self).__init__(inputbuffers, outputbuffers)
def process(self, inputs, outputs):
inData = inputs[0]
if type(inData.content) == cv.iplimage:
imagecpy = cv.CloneImage(inData.content)
outputs[1] = Data(imagecpy, copy.copy(inData.time), copy.copy(inData.error), copy.copy(inData.number))
else:
outputs[1] = copy.deepcopy(inData)
print
class ContoursNode(Node):
def __init__(self, inputbuffers, outputbuffers):
super(type(self), self).__init__(inputbuffers, outputbuffers)
def process(self, inputs, outputs):
inData = inputs[0]
storage = cv.CreateMemStorage(0)
contours = cv.FindContours(inData.content, storage, cv.CV_RETR_EXTERNAL, cv.CV_CHAIN_APPROX_SIMPLE)
contoursArr = []
while contours:
points = []
for (x,y) in contours:
points.append((x,y))
contoursArr.append(points)
contours = contours.h_next()
outputs[0] = Data(contoursArr, inData.time, inData.error, inData.number)
pass
class DrawContoursNode(Node):
def __init__(self, inputbuffers, outputbuffers):
super(type(self), self).__init__(inputbuffers, outputbuffers)
def process(self, inputs, outputs):
inImg = inputs[0]
contours = inputs[1].content
print "Image start"
for cont in contours:
for (x,y) in cont:
cv.Circle(inImg.content, (x,y), 2, cv.CV_RGB(255, 0, 0))
print "Image end"
outputs[0] = inImg
This is the main function. Here all the nodes and buffers are created.
from NodeImpls import *
from Buffer import Buffer
buffer1 = Buffer()
buffer2 = Buffer()
buffer3 = Buffer()
buffer4 = Buffer()
buffer5 = Buffer()
buffer6 = Buffer()
innode = InputNode([], [buffer1])
split = SplitNode([buffer1], [buffer2, buffer3])
thresh = ThresholdNode([buffer3], [buffer4])
contours = ContoursNode([buffer4], [buffer5])
drawc = DrawContoursNode([buffer2, buffer5],[buffer6])
outnode = OutputNode([buffer6], [], "out1")
innode.start()
split.start()
thresh.start()
contours.start()
drawc.start()
outnode.start()
while True:
pass
The buffer:
class Buffer(object):
def __init__(self):
self.data = None
def setData(self, data):
self.data = data
def getData(self):
return self.data

I think this has something to do with the storage of OpenCV but I don't understand why the storage is needed and what it does
Storage is just a place to keep the results. OpenCV is a C++ library, and relies on manual memory allocation, C++ style. Python bindings are just a thin wrapper around it, and are not very pythonic. That's why you have to allocate storage manually, like if you did it in C or in C++.
I have two functions running in different threads
...
This works except that in the result the contours are drawn in the image one or two images before their corresponding image.
I assume your threads are not properly synchronized. This problem is not likely to be related to OpenCV, but to what functions you have, what data they use and pass around, and how you share the data between them.
In short, please post your code where you create threads and call these functions, as well where inImg, inData, contour, contours and storage are accessed or modified.

Related

How to retrieve the attributes of a saved model class to initialize it

I have the following class. I'd like to save this class in one script and load it in another one. However, I don't know how I can correctly initialize the attributes of a class when I load it somewhere else?!
class Dataset(nn.Module):
def __init__(
self,
observation_spec,
action_spec,
size,
):
super(Dataset, self).__init__()
self._size = size
obs_shape = list(observation_spec.shape)
obs_type = observation_spec.dtype
action_shape = list(action_spec.shape)
action_type = action_spec.dtype
self._s1 = self._zeros([size] + obs_shape, obs_type)
self._s2 = self._zeros([size] + obs_shape, obs_type)
self._a1 = self._zeros([size] + action_shape, action_type)
self._a2 = self._zeros([size] + action_shape, action_type)
self._discount = self._zeros([size], torch.float32)
self._reward = self._zeros([size], torch.float32)
self._data = Transition(
s1=self._s1, s2=self._s2, a1=self._a1, a2=self._a2,
discount=self._discount, reward=self._reward)
self._current_size = torch.autograd.Variable(torch.tensor(0), requires_grad=False)
self._current_idx = torch.autograd.Variable(torch.tensor(0), requires_grad=False)
self._capacity = torch.autograd.Variable(torch.tensor(self._size))
self._config = collections.OrderedDict(
observation_spec=observation_spec,
action_spec=action_spec,
size=size
#property
def config(self):
return self._config
#property
def data(self):
return self._data
#property
def capacity(self):
return self._size
#property
def size(self):
return self._current_size.numpy()
def _zeros(self, shape, dtype):
"""Create a variable initialized with zeros."""
return torch.autograd.Variable(torch.zeros(shape, dtype = dtype))
#save the model/class
assert data.size == data.capacity
data_ckpt_name = os.path.join(log_dir, 'data_{}.pt'.format(env_name))
torch.save([data.capacity, data.state_dict()], data_ckpt_name)
whole_data_ckpt_name = os.path.join(log_dir, 'data_{}.pth'.format(env_name))
with open( whole_data_ckpt_name, 'wb') as filehandler:
pickle.dump(data, filehandler)
when I tried to load this class based on this answer with its attribute inside another script
dm_env = gym.spec(env_name).make()
env = alf_gym_wrapper.AlfGymWrapper(dm_env)
observation_spec = env.observation_spec()
action_spec = env.action_spec()
# Prepare data.
logging.info('Loading data from %s ...', data_file)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_ckpt_name = os.path.join(data_file, 'data_{}.pt'.format(env_name))
whole_data_ckpt_name = os.path.join(data_file, 'data_{}.pth'.format(env_name))
data_size, state = torch.load(data_ckpt_name, map_location=device)
full_data = dc.Dataset(observation_spec, action_spec, data_size)
open(whole_data_ckpt_name, 'a').close()
scores = {};
try:
with open(whole_data_ckpt_name, "rb") as file:
unpickler = pickle.Unpickler(file);
scores = unpickler.load();
if not isinstance(scores, dict):
scores = {};
except EOFError:
return {}
print(f"loaded data {scores}")
full_data.load_state_dict( state)
print(f"loaded data : {full_data.size}")
It seems the code breaks down here.
I am wondering how I can extract the size attribute of the class from the saved model when I load it, in order to properly initialize this attribute?

Pythonic way to pass a method to another function

I'm unsure the best way to do the following. That is, I'm not sure if I should have a parent class UniSamplingStrategy and child classes UniformSampling, and RandomSampling. Or should I just have UniSamplingStrategy and have the types of samplings as methods? For example, this is what I did:
import numpy as np
## make a base class w/ child classes instead?
class UniSamplingStrategy():
def __init__(self,
left=0,
right=0,
num_samples=0,
cluster_center=None,
defined_array=[0]
):
self._left = left
self._right = right
self._num_samples = num_samples
self._cluster_center = cluster_center
self._defined_array = defined_array
# uniform sampling
def uniform_sampling(self):
return np.linspace(start=self._left,
stop=self._right,
num=self._num_samples,
endpoint=True,
dtype=np.float32)
# random spacing
def clustered_sampling(self):
return np.random.normal(loc=self._clust_center,
scale=(self._right - self._left)/4,
size=self._num_samples)
What I want to do with this class (or perhaps classes, if I need to rewrite for good python) is pass a sampling strategy to my data_generation method.
def data_generation(noise_scale,
sampling_strategy,
test_func,
noise_type
):
x_samples = sampling_strategy
y_samples = test_func(x=x_samples)
if noise_type is not None:
_, y_samples_noise = noise_type(x=x_samples, scale=noise_scale)
y_samples = y_samples + y_samples_noise
return x_samples, y_samples
def test_func(x):
return (np.cos(x))**2/((x/6)**2+1)
def hmskd_noise(x, scale):
scales = scale
return scales, np.random.normal(scale=scale, size=x.shape[0])
So that ideally, I could try different test functions, noise, and sampling schemes. Where I could write function calls like:
x_true, y_true = data_generation(sampling_strategy=uniform_sampling(left=0, right=10, num_samples=1000)
test_func = test_func,
noise_type=None,
noise_scale = 0)
x_obs, y_obs = data_generation(sampling_strategy=clustered_sampling(clustered_center=5, left=0, right=10, num_samples = 20),
test_func = test_func,
noise_type=hmskd_noise,
noise_scale=0.2)
Essentially, I'm interested in the best way to pass a sampling strategy to data_generation when each method can have different parameters to pass (e.g., see uniform_sampling and clustered_sampling parameters).
Thanks for your time :)
For example, you can have a set of classes with __call__ method. Like
class UniformSampling:
def __init__(self,
left=0,
right=0,
num_samples=0,
cluster_center=None,
defined_array=[0]
):
self._left = left
self._right = right
self._num_samples = num_samples
self._cluster_center = cluster_center
self._defined_array = defined_array
def __call__(self, arg1, arg2):
return np.linspace(start=self._left,
stop=self._right,
num=self._num_samples,
endpoint=True,
dtype=np.float32)
Then you can pass instantiated object to data_generation as
x_true, y_true = data_generation(sampling_strategy=UniformSampling(left=0, right=10, num_samples=1000),
test_func = test_func,
noise_type=None,
noise_scale = 0)

Kivy camera2 api get buffer

I'm looking for a way to get a buffer from camera2 API in Kivy, like with camera_android.
I am using the code from the Color blind project :https://github.com/inclement/colour-blind-camera/blob/master/camera2/camera2.py
THANKS.
camera_android
from jnius import autoclass, PythonJavaClass, java_method
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from kivy.graphics import Fbo, Callback, Rectangle
from kivy.core.camera import CameraBase
import threading
Camera = autoclass('android.hardware.Camera')
SurfaceTexture = autoclass('android.graphics.SurfaceTexture')
GL_TEXTURE_EXTERNAL_OES = autoclass(
'android.opengl.GLES11Ext').GL_TEXTURE_EXTERNAL_OES
ImageFormat = autoclass('android.graphics.ImageFormat')
class PreviewCallback(PythonJavaClass):
"""
Interface used to get back the preview frame of the Android Camera
"""
__javainterfaces__ = ('android.hardware.Camera$PreviewCallback', )
def __init__(self, callback):
super(PreviewCallback, self).__init__()
self._callback = callback
#java_method('([BLandroid/hardware/Camera;)V')
def onPreviewFrame(self, data, camera):
self._callback(data, camera)
class CameraAndroid(CameraBase):
"""
Implementation of CameraBase using Android API
"""
_update_ev = None
def __init__(self, **kwargs):
self._android_camera = None
self._preview_cb = PreviewCallback(self._on_preview_frame)
self._buflock = threading.Lock()
super(CameraAndroid, self).__init__(**kwargs)
def __del__(self):
self._release_camera()
def init_camera(self):
self._release_camera()
self._android_camera = Camera.open(self._index)
params = self._android_camera.getParameters()
width, height = self._resolution
params.setPreviewSize(width, height)
params.setFocusMode('continuous-picture')
self._android_camera.setParameters(params)
# self._android_camera.setDisplayOrientation()
self.fps = 30.
pf = params.getPreviewFormat()
assert(pf == ImageFormat.NV21) # default format is NV21
self._bufsize = int(ImageFormat.getBitsPerPixel(pf) / 8. *
width * height)
self._camera_texture = Texture(width=width, height=height,
target=GL_TEXTURE_EXTERNAL_OES,
colorfmt='rgba')
self._surface_texture = SurfaceTexture(int(self._camera_texture.id))
self._android_camera.setPreviewTexture(self._surface_texture)
self._fbo = Fbo(size=self._resolution)
self._fbo['resolution'] = (float(width), float(height))
self._fbo.shader.fs = '''
#extension GL_OES_EGL_image_external : require
#ifdef GL_ES
precision highp float;
#endif
/* Outputs from the vertex shader */
varying vec4 frag_color;
varying vec2 tex_coord0;
/* uniform texture samplers */
uniform sampler2D texture0;
uniform samplerExternalOES texture1;
uniform vec2 resolution;
void main()
{
vec2 coord = vec2(tex_coord0.y * (
resolution.y / resolution.x), 1. -tex_coord0.x);
gl_FragColor = texture2D(texture1, tex_coord0);
}
'''
with self._fbo:
self._texture_cb = Callback(lambda instr:
self._camera_texture.bind)
Rectangle(size=self._resolution)
def _release_camera(self):
if self._android_camera is None:
return
self.stop()
self._android_camera.release()
self._android_camera = None
# clear texture and it'll be reset in `_update` pointing to new FBO
self._texture = None
del self._fbo, self._surface_texture, self._camera_texture
def _on_preview_frame(self, data, camera):
with self._buflock:
if self._buffer is not None:
# add buffer back for reuse
self._android_camera.addCallbackBuffer(self._buffer)
self._buffer = data
# check if frame grabbing works
# print self._buffer, len(self.frame_data)
def _refresh_fbo(self):
self._texture_cb.ask_update()
self._fbo.draw()
def start(self):
super(CameraAndroid, self).start()
with self._buflock:
self._buffer = None
for k in range(2): # double buffer
buf = b'\x00' * self._bufsize
self._android_camera.addCallbackBuffer(buf)
self._android_camera.setPreviewCallbackWithBuffer(self._preview_cb)
self._android_camera.startPreview()
if self._update_ev is not None:
self._update_ev.cancel()
self._update_ev = Clock.schedule_interval(self._update, 1 / self.fps)
def stop(self):
super(CameraAndroid, self).stop()
if self._update_ev is not None:
self._update_ev.cancel()
self._update_ev = None
self._android_camera.stopPreview()
self._android_camera.setPreviewCallbackWithBuffer(None)
# buffer queue cleared as well, to be recreated on next start
with self._buflock:
self._buffer = None
def _update(self, dt):
self._surface_texture.updateTexImage()
self._refresh_fbo()
if self._texture is None:
self._texture = self._fbo.texture
self.dispatch('on_load')
self._copy_to_gpu()
def _copy_to_gpu(self):
"""
A dummy placeholder (the image is already in GPU) to be consistent
with other providers.
"""
self.dispatch('on_texture')
def grab_frame(self):
"""
Grab current frame (thread-safe, minimal overhead)
"""
with self._buflock:
if self._buffer is None:
return None
buf = self._buffer.tostring()
return buf
def decode_frame(self, buf):
"""
Decode image data from grabbed frame.
This method depends on OpenCV and NumPy - however it is only used for
fetching the current frame as a NumPy array, and not required when
this :class:`CameraAndroid` provider is simply used by a
:class:`~kivy.uix.camera.Camera` widget.
"""
import numpy as np
from cv2 import cvtColor
w, h = self._resolution
arr = np.fromstring(buf, 'uint8').reshape((h + h / 2, w))
arr = cvtColor(arr, 93) # NV21 -> BGR
return arr
def read_frame(self):
"""
Grab and decode frame in one call
"""
return self.decode_frame(self.grab_frame())
#staticmethod
def get_camera_count():
"""
Get the number of available cameras.
"""
return Camera.getNumberOfCameras()
color blind camera
from kivy.event import EventDispatcher
from kivy.graphics.texture import Texture
from kivy.graphics import Fbo, Callback, Rectangle
from kivy.properties import (BooleanProperty, StringProperty, ObjectProperty, OptionProperty, ListProperty)
from kivy.clock import Clock
from jnius import autoclass, cast, PythonJavaClass, java_method, JavaClass, MetaJavaClass, JavaMethod
import logging
from enum import Enum
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
CameraManager = autoclass("android.hardware.camera2.CameraManager")
PythonActivity = autoclass("org.kivy.android.PythonActivity")
Context = autoclass("android.content.Context")
context = cast("android.content.Context", PythonActivity.mActivity)
CameraDevice = autoclass("android.hardware.camera2.CameraDevice")
CaptureRequest = autoclass("android.hardware.camera2.CaptureRequest")
CameraCharacteristics = autoclass("android.hardware.camera2.CameraCharacteristics")
ArrayList = autoclass('java.util.ArrayList')
JavaArray = autoclass('java.lang.reflect.Array')
SurfaceTexture = autoclass('android.graphics.SurfaceTexture')
Surface = autoclass('android.view.Surface')
GL_TEXTURE_EXTERNAL_OES = autoclass(
'android.opengl.GLES11Ext').GL_TEXTURE_EXTERNAL_OES
ImageFormat = autoclass('android.graphics.ImageFormat')
Handler = autoclass("android.os.Handler")
Looper = autoclass("android.os.Looper")
MyStateCallback = autoclass("net.inclem.camera2.MyStateCallback")
CameraActions = autoclass("net.inclem.camera2.MyStateCallback$CameraActions")
# MyStateCallback = autoclass("org.kivy.android.MyStateCallback")
MyCaptureSessionCallback = autoclass("net.inclem.camera2.MyCaptureSessionCallback")
CameraCaptureEvents = autoclass("net.inclem.camera2.MyCaptureSessionCallback$CameraCaptureEvents")
_global_handler = Handler(Looper.getMainLooper())
class LensFacing(Enum):
"""Values copied from CameraCharacteristics api doc, as pyjnius
lookup doesn't work on some devices.
"""
LENS_FACING_FRONT = 0
LENS_FACING_BACK = 1
LENS_FACING_EXTERNAL = 2
class ControlAfMode(Enum):
CONTROL_AF_MODE_CONTINUOUS_PICTURE = 4
class ControlAeMode(Enum):
CONTROL_AE_MODE_ON = 1
class Runnable(PythonJavaClass):
__javainterfaces__ = ['java/lang/Runnable']
def __init__(self, func):
super(Runnable, self).__init__()
self.func = func
#java_method('()V')
def run(self):
try:
self.func()
except:
import traceback
traceback.print_exc()
class PyCameraInterface(EventDispatcher):
"""
Provides an API for querying details of the cameras available on Android.
"""
camera_ids = []
cameras = ListProperty()
java_camera_characteristics = {}
java_camera_manager = ObjectProperty()
def __init__(self):
super().__init__()
logger.info("Starting camera interface init")
self.java_camera_manager = cast("android.hardware.camera2.CameraManager",
context.getSystemService(Context.CAMERA_SERVICE))
self.camera_ids = self.java_camera_manager.getCameraIdList()
characteristics_dict = self.java_camera_characteristics
camera_manager = self.java_camera_manager
logger.info("Got basic java objects")
for camera_id in self.camera_ids:
logger.info(f"Getting data for camera {camera_id}")
characteristics_dict[camera_id] = camera_manager.getCameraCharacteristics(camera_id)
logger.info("Got characteristics dict")
self.cameras.append(PyCameraDevice(
camera_id=camera_id,
java_camera_manager=camera_manager,
java_camera_characteristics=characteristics_dict[camera_id],
))
logger.info(f"Finished interpreting camera {camera_id}")
def select_cameras(self, **conditions):
options = self.cameras
outputs = []
for camera in cameras:
for key, value in conditions.items():
if getattr(camera, key) != value:
break
else:
outputs.append(camera)
return outputs
class PyCameraDevice(EventDispatcher):
camera_id = StringProperty()
output_texture = ObjectProperty(None, allownone=True)
preview_active = BooleanProperty(False)
preview_texture = ObjectProperty(None, allownone=True)
preview_resolution = ListProperty()
preview_fbo = ObjectProperty(None, allownone=True)
java_preview_surface_texture = ObjectProperty(None)
java_preview_surface = ObjectProperty(None)
java_capture_request = ObjectProperty(None)
java_surface_list = ObjectProperty(None)
java_capture_session = ObjectProperty(None)
connected = BooleanProperty(False)
supported_resolutions = ListProperty()
# TODO: populate this
facing = OptionProperty("UNKNOWN", options=["UNKNOWN", "FRONT", "BACK", "EXTERNAL"])
java_camera_characteristics = ObjectProperty()
java_camera_manager = ObjectProperty()
java_camera_device = ObjectProperty()
java_stream_configuration_map = ObjectProperty()
_open_callback = ObjectProperty(None, allownone=True)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.register_event_type("on_opened")
self.register_event_type("on_closed")
self.register_event_type("on_disconnected")
self.register_event_type("on_error")
self._java_state_callback_runnable = Runnable(self._java_state_callback)
self._java_state_java_callback = MyStateCallback(self._java_state_callback_runnable)
self._java_capture_session_callback_runnable = Runnable(self._java_capture_session_callback)
self._java_capture_session_java_callback = MyCaptureSessionCallback(
self._java_capture_session_callback_runnable)
self._populate_camera_characteristics()
def on_opened(self, instance):
pass
def on_closed(self, instance):
pass
def on_disconnected(self, instance):
pass
def on_error(self, instance, error):
pass
def close(self):
self.java_camera_device.close()
def _populate_camera_characteristics(self):
logger.info("Populating camera characteristics")
self.java_stream_configuration_map = self.java_camera_characteristics.get(
CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)
logger.info("Got stream configuration map")
self.supported_resolutions = [
(size.getWidth(), size.getHeight()) for size in
self.java_stream_configuration_map.getOutputSizes(SurfaceTexture(0).getClass())]
logger.info("Got supported resolutions")
facing = self.java_camera_characteristics.get(
CameraCharacteristics.LENS_FACING)
logger.info(f"Got facing: {facing}")
if facing == LensFacing.LENS_FACING_BACK.value: # CameraCharacteristics.LENS_FACING_BACK:
self.facing = "BACK"
elif facing == LensFacing.LENS_FACING_FRONT.value: # CameraCharacteristics.LENS_FACING_FRONT:
self.facing = "FRONT"
elif facing == LensFacing.LENS_FACING_EXTERNAL.value: # CameraCharacteristics.LENS_FACING_EXTERNAL:
self.facing = "EXTERNAL"
else:
raise ValueError("Camera id {} LENS_FACING is unknown value {}".format(self.camera_id, facing))
logger.info(f"Finished initing camera {self.camera_id}")
def __str__(self):
return "<PyCameraDevice facing={}>".format(self.facing)
def __repr__(self):
return str(self)
def open(self, callback=None):
self._open_callback = callback
self.java_camera_manager.openCamera(
self.camera_id,
self._java_state_java_callback,
_global_handler
)
def _java_state_callback(self, *args, **kwargs):
action = MyStateCallback.camera_action.toString()
camera_device = MyStateCallback.camera_device
self.java_camera_device = camera_device
logger.info("CALLBACK: camera event {}".format(action))
if action == "OPENED":
self.dispatch("on_opened", self)
self.connected = True
elif action == "DISCONNECTED":
self.dispatch("on_disconnected", self)
self.connected = False
elif action == "CLOSED":
self.dispatch("on_closed", self)
self.connected = False
elif action == "ERROR":
error = MyStateCallback.camera_error
self.dispatch("on_error", self, error)
self.connected = False
elif action == "UNKNOWN":
print("UNKNOWN camera state callback item")
self.connected = False
else:
raise ValueError("Received unknown camera action {}".format(action))
if self._open_callback is not None:
self._open_callback(self, action)
def start_preview(self, resolution):
if self.java_camera_device is None:
raise ValueError("Camera device not yet opened, cannot create preview stream")
if resolution not in self.supported_resolutions:
raise ValueError(
"Tried to open preview with resolution {}, not in supported resolutions {}".format(
resolution, self.supported_resolutions))
if self.preview_active:
raise ValueError("Preview already active, can't start again without stopping first")
logger.info("Creating capture stream with resolution {}".format(resolution))
self.preview_resolution = resolution
self._prepare_preview_fbo(resolution)
self.preview_texture = Texture(
width=resolution[0], height=resolution[1], target=GL_TEXTURE_EXTERNAL_OES, colorfmt="rgba")
logger.info("Texture id is {}".format(self.preview_texture.id))
self.java_preview_surface_texture = SurfaceTexture(int(self.preview_texture.id))
self.java_preview_surface_texture.setDefaultBufferSize(*resolution)
self.java_preview_surface = Surface(self.java_preview_surface_texture)
self.java_capture_request = self.java_camera_device.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW)
self.java_capture_request.addTarget(self.java_preview_surface)
self.java_capture_request.set(
CaptureRequest.CONTROL_AF_MODE, ControlAfMode.CONTROL_AF_MODE_CONTINUOUS_PICTURE.value) # CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_PICTURE)
self.java_capture_request.set(
CaptureRequest.CONTROL_AE_MODE, ControlAeMode.CONTROL_AE_MODE_ON.value) # CaptureRequest.CONTROL_AE_MODE_ON)
self.java_surface_list = ArrayList()
self.java_surface_list.add(self.java_preview_surface)
self.java_camera_device.createCaptureSession(
self.java_surface_list,
self._java_capture_session_java_callback,
_global_handler,
)
return self.preview_fbo.texture
def _prepare_preview_fbo(self, resolution):
self.preview_fbo = Fbo(size=resolution)
self.preview_fbo['resolution'] = [float(f) for f in resolution]
self.preview_fbo.shader.fs = """
#extension GL_OES_EGL_image_external : require
#ifdef GL_ES
precision highp float;
#endif
/* Outputs from the vertex shader */
varying vec4 frag_color;
varying vec2 tex_coord0;
/* uniform texture samplers */
uniform sampler2D texture0;
uniform samplerExternalOES texture1;
uniform vec2 resolution;
void main()
{
gl_FragColor = texture2D(texture1, tex_coord0);
}
"""
with self.preview_fbo:
Rectangle(size=resolution)
def _java_capture_session_callback(self, *args, **kwargs):
event = MyCaptureSessionCallback.camera_capture_event.toString()
logger.info("CALLBACK: capture event {}".format(event))
self.java_capture_session = MyCaptureSessionCallback.camera_capture_session
if event == "READY":
logger.info("Doing READY actions")
self.java_capture_session.setRepeatingRequest(self.java_capture_request.build(), None, None)
Clock.schedule_interval(self._update_preview, 0.)
def _update_preview(self, dt):
self.java_preview_surface_texture.updateTexImage()
self.preview_fbo.ask_update()
self.preview_fbo.draw()
self.output_texture = self.preview_fbo.texture
The code i want implement from camera_android to color blind
def grab_frame(self):
"""
Grab current frame (thread-safe, minimal overhead)
"""
with self._buflock:
if self._buffer is None:
return None
buf = self._buffer.tostring()
return buf

Attempt to pickle unknown type while creating a deepcopy

I have the following two classes:
class QPolygonModel(QtGui.QPolygon):
_idx = None
_selected = None
def __init__(self, idx, polygon: QtGui.QPolygon = None):
# Call default constructor
if polygon is None:
super().__init__()
# Call copy constructor
else:
super().__init__(polygon)
self._idx = idx
self._selected = False
#property
def idx(self):
return self._idx
#property
def is_selected(self):
return self._selected
#is_selected.setter
def is_selected(self, flag):
self._selected = flag
def get_points(self):
res = []
for i in range(0, self.size()):
res.append(self.point(i))
return res
This is a custom polygon class that inherits from QPolygon. Objects of this class are stored in a list in the "Scene" class:
class ImageAnnotatorState:
points = None
radius = None
image = None
polygons = None
_finished = None
multiselect = None
def __init__(self, image):
super().__init__()
self.points = QtGui.QPolygon()
self.radius = 8
self.image = image
self.polygons = self._init_polygons()
self.is_finished = False
self.multiselect = False
def _init_polygons(self):
result = []
for annotation in self.image.annotations:
polyline = QPolygonModel(annotation.get_id())
for point in annotation.points:
q_point = QPoint(point.x, point.y)
polyline.append(q_point)
result.append(polyline)
return result
#property
def is_finished(self):
return self._finished
#is_finished.setter
def is_finished(self, flag):
self._finished = flag
Now for the purpose of creating an undo function, I need to create a deepcopy of this scene class so I can store the state that was active before a scene change was made.
So in a QDialog form, I try to do the following:
class ImageAnnotator(QDialog):
_state = None
_previous_state = None
def __init__(self, image):
super().__init__()
self._state = ImageAnnotatorState(image)
self._previous_state = copy.deepcopy(self._state)
self.show()
The deepcopy call here fails with the following exception:
SystemError: attempt to pickle unknown type 'QPolygonModel'
What am I doing wrong?
EDIT:
Reproducible example:
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QApplication
import copy
import sys
class Test(QtGui.QPolygon):
idx = None
def __init__(self, z = None):
if z is None:
super().__init__()
else:
super().__init__(z)
class State:
test = None
def __init__(self):
self.test = [Test(), Test()]
print(self.test)
class Main(QDialog):
state = None
prev = None
def __init__(self):
super().__init__()
self.state = State()
prev = copy.deepcopy(self.state)
print(prev)
app = QApplication(sys.argv)
Main()
It seems that it is a bug similar to the one that ekhumoro points out in this answer. A workaround is to implement the __deepcopy__ method.
On the other hand if you want to set the default value in the case of QPolygon do not use None but an empty QPolygon.
With the above, I have implemented the following:
import copy
import random
from PyQt5 import QtCore, QtGui
class QPolygonModel(QtGui.QPolygon):
def __init__(self, idx, polygon=QtGui.QPolygon()):
super().__init__(polygon)
self._idx = idx
self._selected = False
#property
def idx(self):
return self._idx
#property
def is_selected(self):
return self._selected
#is_selected.setter
def is_selected(self, flag):
self._selected = flag
def get_points(self):
res = []
for i in range(0, self.size()):
res.append(self.point(i))
return res
# https://stackoverflow.com/a/10622689
def __deepcopy__(self, memo):
o = QPolygonModel(self.idx)
o.__dict__.update(self.__dict__)
ba = QtCore.QByteArray()
stream_w = QtCore.QDataStream(ba, QtCore.QIODevice.WriteOnly)
stream_w << self
stream_r = QtCore.QDataStream(ba, QtCore.QIODevice.ReadOnly)
stream_r >> o
return o
class State:
def __init__(self):
self.polylines = []
for _ in range(4):
poly = QtGui.QPolygon(
[QtCore.QPoint(*random.sample(range(10), 2)) for _ in range(4)]
)
polyline = QPolygonModel(random.randint(0, 10), poly)
self.polylines.append(polyline)
if __name__ == "__main__":
curr = State()
prev = copy.deepcopy(curr)
assert len(curr.polylines) == len(prev.polylines)
for polyline1, polyline2 in zip(curr.polylines, prev.polylines):
assert id(polyline1) != id(polyline2)
assert polyline1.size() == polyline2.size()
assert polyline1.is_selected == polyline2.is_selected
assert polyline1.idx == polyline2.idx
for i, j in zip(range(polyline1.size()), range(polyline2.size())):
assert polyline1.point(i) == polyline2.point(j)

Would I be able to separate variables into another class but keep the usage the same?

I'm trying to rewrite a script and I'm stuck on making it easy to use. Basically it's an assembly script (like the reverse of destruction), where you input a load of variables such as location, whether the location is absolute or relative, scale, rotation, visibility, random offset, etc, to create an animation. The first version was very non user friendly, so I'm trying to get it working nicely from the start this time.
I've thought of how I'd like it to work, and I've managed to keep it clean, but there is a flaw. As you can see below, it'd be possible to use anything like SetGroup.frame[i].save(), which I don't want (and I don't want to put checks on if name is None throughout the class).
Here is the code I have:
class SetGroup(object):
def __init__(self, name=None, _frame_only=False):
if name is None and not _frame_only:
raise TypeError('name of group must be provided')
self.selection = None
self.origin = None
self.start = None
self.end = None
self.offset = 0
self.distance = None
self.random = 0
self.location = None
self.rotation = None
self.scale = None
self.visibility = None
if not _frame_only:
self.frame = defaultdict(lambda: SetGroup(_frame_only=True))
def save(self):
self.load()
#do a bit of error checking here
self.data[self.name] = {'ObjectSelection': self.selection,
'ObjectOrigin': self.origin,
'FrameStart': self.start,
'FrameEnd': self.end,
'FrameOffset': self.offset,
'FrameDistance': self.distance,
'FrameRandom': self.random,
'StartLocation': self.location,
'StartRotation': self.rotation,
'StartScale': self.scale,
'StartVisibility': self.visibility,
'ExtraFrames': self.frame}
pm.fileInfo['AssemblyScript'] = StoreData().save(self.data)
def load(self):
try:
self.data = StoreData().load(pm.fileInfo['AssemblyScript'])
except KeyError:
pm.fileInfo['AssemblyScript'] = StoreData().save({})
The way I'd like it to work is like this:
a = SetGroup('test')
a.location = ((0, 0, 0), True)
a.start = 0
a.end = 10
a.frame[5].location = ((10, 10, 10), False)
a.frame[5].scale = ((2, 1, 1), True)
a.save()
Unless anyone can think of a way which would make it more friendly to use, how would I separate location, rotation, scale, and visibility into another class and link them up again, so that they still work at the core level of the class, but also work for the frame dictionary too?
Edit - Got it working to a basic level:
class _MovementInfo(object):
def __init__(self, location=None, rotation=None, scale=None, visibility=None):
self.location = location
self.rotation = rotation
self.scale = scale
self.visibility = visibility
def __repr__(self):
return '_MovementInfo(location={x.location}, rotation={x.rotation}, scale={x.scale}, visibility={x.visibility}'.format(x=self)
Then I used this in the main class to merge the dictionaries:
self.__dict__.update({k: v for k, v in _MovementInfo().__dict__.iteritems() if '__' not in k})
self.frame = defaultdict(_MovementInfo)
I would change the code like this:
class SetGroup(_Movement):
def __init__(self, name=None):
if name is None:
# ...
super().__init__()
# ...
self.random = 0 # __init__ should end here
# ...
But you should check that all _MovementInfo's in all frames are _MovementInfo's or have inherited from them (to check this: isinstance(x, _MovementInfo)), but are not SetGroup's (to check this: not isinstance(x, SetGroup)).
super() is short for super(SetGroup, self) (you have to use the last option for python2), and is basicly an object that holds all things that the base class has, and allows you to call methods that modify the class calling it.
Or in code:
class A(object):
def __init__(self, y):
self.x = 2
self.y = y
class B(A):
def __init__(self, y, z):
super().__init__(y) # equivalent to: A.__init__(self, y)
self.z = z
b = B(3, 4)
# b's x is 2, b's y is 3 (both set by A.__init__, the last one was passed by B), and b's z is 4 (set by B.__init__)
I hope this helped,
CodenameLambda

Categories