IDS Cameras: Pyueye python package set exposure parameter (is_SetAutoParameter function) - python

I am working with IDS-3240CP-NIR cameras and operating them with the pyueye package that accompanies them. Because these cameras are being used to calculate NDVI from imagery between two different cameras, I need to ensure they have equivalent parameters except for exposure, which will be auto adjusted for lighting conditions and adjusted for in post processing. The trouble is, I'm not sure how to set the auto exposure in pyueye to produce good results. When I use the IDS GUI software and flip on auto exposure, things look great. But when I try to do this in software, the imagery is way over exposed.
From what I can tell in the documentation, there are two ways to set the auto exposure: the pyueye.Is_SetAutoParameter function (https://en.ids-imaging.com/manuals/ids-software-suite/ueye-manual/4.93/en/is_setautoparameter.html?q=set+auto+parameter) and the pyueye.Is_AutoParameter function (https://en.ids-imaging.com/manuals/ids-software-suite/ueye-manual/4.93/en/is_autoparameterautoexposure.html?q=aes). The Is_AutoParameter function may be the superior method based on how the documentation talks about it, but I have not been able to figure out how to use this function (I believe I am sending the wrong type of pval's). Instead, I've been setting the auto shutter speed through the Is_SetAutoParameter, but results are still overexposed.
Here is some of the code I am running:
hCam = ueye.HIDS(1)
sInfo = ueye.SENSORINFO()
cInfo = ueye.CAMINFO()
pcImageMemory = ueye.c_mem_p()
MemID = ueye.int()
rectAOI = ueye.IS_RECT()
pitch = ueye.INT()
nBitsPerPixel = ueye.INT(32)
channels = 4
m_nColorMode = ueye.IS_CM_RGBY8_PACKED
bytes_per_pixel = int(nBitsPerPixel / 8)
# Camera Init
nRet = ueye.is_InitCamera(hCam, None)
if nRet != ueye.IS_SUCCESS:
print("is_InitCamera1 ERROR")
# Get Sensor Info
nRet = ueye.is_GetSensorInfo(hCam, sInfo)
if nRet != ueye.IS_SUCCESS:
print("is_GetSensorInfo ERROR")
print(sInfo.strSensorName)
# Set Display Mode
nRet = ueye.is_SetDisplayMode(hCam, ueye.IS_SET_DM_DIB)
# Set Color mode
nRet = ueye.is_SetColorMode(hCam, ueye.IS_CM_RGBY8_PACKED)
# Area of Interest
print("rectAOI type:")
print(type(rectAOI))
nRet = ueye.is_AOI(hCam, ueye.IS_AOI_IMAGE_GET_AOI, rectAOI, ueye.sizeof(rectAOI))
if nRet != ueye.IS_SUCCESS:
print("is_AOI ERROR")
# Define Width and Height
width = rectAOI.s32Width
height = rectAOI.s32Height
# Prints out some information about the camera and the sensor
print("Camera 1")
print("Camera model:\t\t", sInfo.strSensorName.decode('utf-8'))
print("Camera serial no.:\t", cInfo.SerNo.decode('utf-8'))
print("Maximum image width:\t", width)
print("Maximum image height:\t", height)
print()
# Allocate Image Memory
nRet = ueye.is_AllocImageMem(hCam, width, height, nBitsPerPixel, pcImageMemory, MemID)
if nRet != ueye.IS_SUCCESS:
print("is_AllocImageMem ERROR")
# Add to Sequence
nRet = ueye.is_AddToSequence(hCam, pcImageMemory, MemID)
if nRet != ueye.IS_SUCCESS:
print("is_AddToSequence ERROR")
# Capture Video
nRet = ueye.is_CaptureVideo(hCam, ueye.IS_DONT_WAIT)
if nRet != ueye.IS_SUCCESS:
print("is_CaptureVideo ERROR")
# Inquire Image Memory
nRet = ueye.is_InquireImageMem(hCam, pcImageMemory, MemID, width, height, nBitsPerPixel, pitch)
if nRet != ueye.IS_SUCCESS:
print("is_InquireImageMem ERROR")
# Image Display
array = [0]
# Generate Image File Name
timestr = str(datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
image_name = "/home/pi/LakeWheelerTurf/TurfImages/turfcam1_" + timestr + ".bmp"
pval1 = ctypes.c_double(1)
pval2 = ctypes.c_double(0)
nRet = ueye.is_SetAutoParameter(hCam, ueye.IS_SET_ENABLE_AUTO_SHUTTER, pval1, pval2)
# Continuous image display
photo_counter = 0
while (nRet == ueye.IS_SUCCESS):
pval1 = ctypes.c_double(1)
pval2 = ctypes.c_double(0)
nRet = ueye.is_SetAutoParameter(hCam, ueye.IS_SET_ENABLE_AUTO_SHUTTER, pval1, pval2)
nRet = ueye.is_SetAutoParameter(hCam, ueye.IS_GET_ENABLE_AUTO_SHUTTER, pval3, pval2)
print(pval3.value)
#Below is my attempt to get the is_AutoParameter function to work (unsuccessful)
#pval1 = ctypes.c_void_p(1)
#pval2 = ctypes.sizeof(pval1)
#nRet = ueye.is_AutoParameter(hCam, ueye.IS_AES_CMD_SET_ENABLE, pval1, pval_sz)
#if nRet != ueye.IS_SUCCESS:
# print("AES ERROR")
while array[0] == 0:
array = ueye.get_data(pcImageMemory, width.value, height.value, nBitsPerPixel.value, pitch.value,
copy=False)
frame = np.reshape(array, (height.value, width.value, bytes_per_pixel))
photo_counter += 1
time.sleep(1)
if photo_counter >= 10:
break
matplotlib.pyplot.imsave(image_name, frame)
ueye.is_FreeImageMem(hCam, pcImageMemory, MemID)
ueye.is_ExitCamera(hCam)
return image_name
I'd be hugely appreciative if anyone can help me figure this out, I've been working on this for ages to no avail. Below are some samples of the imagery I'm able to get out of the GUI vs pyueye. Thanks!

The pyueye Python porting is missing an implementation for is_SetExposureTime, which support both explicit and automatic exposure control. I assume it's a bug.
I provide here my own implementation for IdsCamera class which contains the missing implementation, plus easy-to-use methods for getting and setting the exposure.
class IdsCamera:
_is_SetExposureTime = ueye._bind("is_SetExposureTime",
[ueye.ctypes.c_uint, ueye.ctypes.c_double,
ueye.ctypes.POINTER(ueye.ctypes.c_double)], ueye.ctypes.c_int)
IS_GET_EXPOSURE_TIME = 0x8000
#staticmethod
def is_SetExposureTime(hCam, EXP, newEXP):
"""
Description
The function is_SetExposureTime() sets the with EXP indicated exposure time in ms. Since this
is adjustable only in multiples of the time, a line needs, the actually used time can deviate from
the desired value.
The actual duration adjusted after the call of this function is readout with the parameter newEXP.
By changing the window size or the readout timing (pixel clock) the exposure time set before is changed also.
Therefore is_SetExposureTime() must be called again thereafter.
Exposure-time interacting functions:
- is_SetImageSize()
- is_SetPixelClock()
- is_SetFrameRate() (only if the new image time will be shorter than the exposure time)
Which minimum and maximum values are possible and the dependence of the individual
sensors is explained in detail in the description to the uEye timing.
Depending on the time of the change of the exposure time this affects only with the recording of
the next image.
:param hCam: c_uint (aka c-type: HIDS)
:param EXP: c_double (aka c-type: DOUBLE) - New desired exposure-time.
:param newEXP: c_double (aka c-type: double *) - Actual exposure time.
:returns: IS_SUCCESS, IS_NO_SUCCESS
Notes for EXP values:
- IS_GET_EXPOSURE_TIME Returns the actual exposure-time through parameter newEXP.
- If EXP = 0.0 is passed, an exposure time of (1/frame rate) is used.
- IS_GET_DEFAULT_EXPOSURE Returns the default exposure time newEXP Actual exposure time
- IS_SET_ENABLE_AUTO_SHUTTER : activates the AutoExposure functionality.
Setting a value will deactivate the functionality.
(see also 4.86 is_SetAutoParameter).
"""
_hCam = ueye._value_cast(hCam, ueye.ctypes.c_uint)
_EXP = ueye._value_cast(EXP, ueye.ctypes.c_double)
ret = IdsCamera._is_SetExposureTime(_hCam, _EXP, ueye.ctypes.byref(newEXP) if newEXP is not None else None)
return ret
def __init__(self, selector=''):
self.hCam = ueye.HIDS(0) # 0: first available camera; 1-254: The camera with the specified camera ID
self.sInfo = ueye.SENSORINFO()
self.cInfo = ueye.CAMINFO()
self.pcImageMemory = ueye.c_mem_p()
self.MemID = ueye.INT()
self.rectAOI = ueye.IS_RECT()
self.pitch = ueye.INT()
self.nBitsPerPixel = ueye.INT()
self.m_nColorMode = ueye.INT()
self.bytes_per_pixel = 0
self.width = ueye.INT()
self.height = ueye.INT()
self.size = (-1, -1)
self.ok = False
self.error_str = ''
self.last_frame = None
def _error(self, err_str):
self.error_str = err_str
return
def connect(self):
self.error_str = ''
# Starts the driver and establishes the connection to the camera
rc = ueye.is_InitCamera(self.hCam, None)
if rc != ueye.IS_SUCCESS:
return self._error("is_InitCamera ERROR")
# Reads out the data hard-coded in the non-volatile camera memory
# and writes it to the data structure that cInfo points to
rc = ueye.is_GetCameraInfo(self.hCam, self.cInfo)
if rc != ueye.IS_SUCCESS:
return self._error("is_GetCameraInfo ERROR")
# You can query additional information about the sensor type used in the camera
rc = ueye.is_GetSensorInfo(self.hCam, self.sInfo)
if rc != ueye.IS_SUCCESS:
return self._error("is_GetSensorInfo ERROR")
rc = ueye.is_ResetToDefault(self.hCam)
if rc != ueye.IS_SUCCESS:
return self._error("is_ResetToDefault ERROR")
# Set display mode to DIB
rc = ueye.is_SetDisplayMode(self.hCam, ueye.IS_SET_DM_DIB)
# Set the right color mode
if int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_BAYER:
# setup the color depth to the current windows setting
ueye.is_GetColorDepth(self.hCam, self.nBitsPerPixel, self.m_nColorMode)
self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
print("IS_COLORMODE_BAYER: ", )
elif int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_CBYCRY:
# for color camera models use RGB32 mode
self.m_nColorMode = ueye.IS_CM_BGRA8_PACKED
self.nBitsPerPixel = ueye.INT(32)
self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
print("IS_COLORMODE_CBYCRY: ", )
elif int.from_bytes(self.sInfo.nColorMode.value, byteorder='big') == ueye.IS_COLORMODE_MONOCHROME:
# for color camera models use RGB32 mode
self.m_nColorMode = ueye.IS_CM_MONO8
self.nBitsPerPixel = ueye.INT(8)
self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
print("IS_COLORMODE_MONOCHROME: ", )
else:
# for monochrome camera models use Y8 mode
self.m_nColorMode = ueye.IS_CM_MONO8
self.nBitsPerPixel = ueye.INT(8)
self.bytes_per_pixel = int(self.nBitsPerPixel / 8)
print("Color mode: not identified")
print("\tm_nColorMode: \t\t", self.m_nColorMode)
print("\tnBitsPerPixel: \t\t", self.nBitsPerPixel)
print("\tbytes_per_pixel: \t\t", self.bytes_per_pixel)
print()
# Can be used to set the size and position of an "area of interest"(AOI) within an image
rc = ueye.is_AOI(self.hCam, ueye.IS_AOI_IMAGE_GET_AOI, self.rectAOI, ueye.sizeof(self.rectAOI))
if rc != ueye.IS_SUCCESS:
return self._error("is_AOI ERROR")
self.width = self.rectAOI.s32Width
self.height = self.rectAOI.s32Height
self.size = (self.width.value, self.height.value)
# Prints out some information about the camera and the sensor
print("Camera model:\t\t", self.sInfo.strSensorName.decode('utf-8'))
print("Camera serial no.:\t", self.cInfo.SerNo.decode('utf-8'))
print("Camera image size:\t", str(self.size))
print()
# Allocates an image memory for an image having its dimensions defined by width and height
# and its color depth defined by nBitsPerPixel
rc = ueye.is_AllocImageMem(self.hCam, self.width, self.height,
self.nBitsPerPixel, self.pcImageMemory, self.MemID)
if rc != ueye.IS_SUCCESS:
return self._error("is_AllocImageMem ERROR")
else:
# Makes the specified image memory the active memory
rc = ueye.is_SetImageMem(self.hCam, self.pcImageMemory, self.MemID)
if rc != ueye.IS_SUCCESS:
return self._error("is_SetImageMem ERROR")
else:
# Set the desired color mode
rc = ueye.is_SetColorMode(self.hCam, self.m_nColorMode)
# Activates the camera's live video mode (free run mode)
rc = ueye.is_CaptureVideo(self.hCam, ueye.IS_DONT_WAIT)
if rc != ueye.IS_SUCCESS:
return self._error("is_CaptureVideo ERROR")
# Enables the queue mode for existing image memory sequences
rc = ueye.is_InquireImageMem(self.hCam, self.pcImageMemory, self.MemID,
self.width, self.height, self.nBitsPerPixel, self.pitch)
if rc != ueye.IS_SUCCESS:
return self._error("is_InquireImageMem ERROR")
else:
print("IDS camera: connection ok")
self.ok = True
def grab_image(self):
if not self.ok:
return None
# In order to display the image in an OpenCV window we need to...
# ...extract the data of our image memory
array = ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)
# ...reshape it in an numpy array...
frame = np.reshape(array, (self.height.value, self.width.value, self.bytes_per_pixel))
# ...resize the image by a half
# frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)
self.last_frame = frame
return frame
def disconnect(self):
# Releases an image memory that was allocated using is_AllocImageMem() and removes it from the driver management
ueye.is_FreeImageMem(self.hCam, self.pcImageMemory, self.MemID)
# Disables the hCam camera handle and releases the data structures and memory areas taken up by the uEye camera
ueye.is_ExitCamera(self.hCam)
def set_camera_exposure(self, level_us):
"""
:param level_us: exposure level in micro-seconds, or zero for auto exposure
note that you can never exceed 1000000/fps, but it is possible to change the fps
"""
p1 = ueye.DOUBLE()
if level_us == 0:
rc = IdsCamera._is_SetExposureTime(self.hCam, ueye.IS_SET_ENABLE_AUTO_SHUTTER, p1)
print(f'set_camera_exposure: set to auto')
else:
ms = ueye.DOUBLE(level_us / 1000)
rc = IdsCamera._is_SetExposureTime(self.hCam, ms, p1)
print(f'set_camera_exposure: requested {ms.value}, got {p1.value}')
def get_camera_exposure(self, force_val=False):
"""
returns the current exposure time in micro-seconds, or zero if auto exposure is on
:param force_val: if True, will return level of exposure even if auto exposure is on
"""
p1 = ueye.DOUBLE()
p2 = ueye.DOUBLE()
# we dump both auto-gain and auto exposure states:
rc = ueye.is_SetAutoParameter(self.hCam, ueye.IS_GET_ENABLE_AUTO_GAIN, p1, p2)
print(f'IS_GET_ENABLE_AUTO_GAIN={p1.value == 1}')
rc = ueye.is_SetAutoParameter(self.hCam, ueye.IS_GET_ENABLE_AUTO_SHUTTER, p1, p2)
print(f'IS_GET_ENABLE_AUTO_SHUTTER={p1.value == 1}')
if (not force_val) and p1.value == 1:
return 0 # auto exposure
rc = IdsCamera._is_SetExposureTime(self.hCam, IdsCamera.IS_GET_EXPOSURE_TIME, p1)
print(f'IS_GET_EXPOSURE_TIME={p1.value}')
return p1.value * 1000

Related

I want to process a live stream from youtube with opencv

I am posting this while using a translation.
I would like to use a live stream on YouTube to action recognition.
Action recognition
https://github.com/felixchenfy/Realtime-Action-Recognition
Read Youtube live stream with opencv
How to read Youtube live stream using openCV python?
Youtube URL
https://www.youtube.com/watch?v=DjdUEyjx8GM
We have programmed it with reference to the above.The program runs normally for a few hours, but after 4 or 5 hours, I get an error saying that communication from the remote host has been lost.Checking the youtube video, the delivery does not seem to be interrupted.
A partial code for videocapture can be found here.
url = "https://www.youtube.com/watch?v=DjdUEyjx8GM"
video = pafy.new(url)
best = video.getbest(preftype="mp4")
class ReadFromVideo(object):
def __init__(self, video_path, sample_interval=1):
''' A video reader class for reading video frames from video.
Arguments:
video_path
sample_interval {int}: sample every kth image.
''' assert isinstance(sample_interval, int) and sample_interval >= 1
self.cnt_imgs = 0
self._is_stoped = False
self._video = cv2.VideoCapture(best.url)
ret, image = self._video.read()
ret, image2 = self._video2.read()
self._next_image = image
self._next_image2 = image2
self._sample_interval = sample_interval
self._fps = self.get_fps()
if not self._fps >= 0.0001:
import warnings
warnings.warn("Invalid fps of video: {}".format(video_path))
def has_image(self):
return self._next_image is not None
def get_curr_video_time(self):
return 1.0 / self._fps * self.cnt_imgs
def read_image(self):
image = self._next_image
image2 = self._next_image2
for i in range(self._sample_interval):
if self._video.isOpened():
ret, frame = self._video.read()
self._next_image = frame
ret, frame2 = self._video2.read()
self._next_image2 = frame2
else:
self._next_image = None
self._next_image2 = None
break
self.cnt_imgs += 1
return image, image2
def stop(self):
self._video.release()
self._video2.release()
self._is_stoped = True
def __del__(self):
if not self._is_stoped:
self.stop()
def get_fps(self):
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# With webcam get(CV_CAP_PROP_FPS) does not work.
# Let's see for ourselves.
# Get video properties
if int(major_ver) < 3:
fps = self._video.get(cv2.cv.CV_CAP_PROP_FPS)
else:
fps = self._video.get(cv2.CAP_PROP_FPS)
return fps
#!/usr/bin/env python
# coding: utf-8
'''
Test action recognition on
(1) a video, (2) a folder of images, (3) or web camera.
Input:
model: model/trained_classifier.pickle
Output:
result video: output/${video_name}/video.avi
result skeleton: output/${video_name}/skeleton_res/XXXXX.txt
visualization by cv2.imshow() in img_displayer
'''
The entire source code is here
'''
Example of usage:
(1) Test on video file:
python src/s5_test.py \
--model_path model/trained_classifier.pickle \
--data_type video \
--data_path data_test/exercise.avi \
--output_folder output
(2) Test on a folder of images:
python src/s5_test.py \
--model_path model/trained_classifier.pickle \
--data_type folder \
--data_path data_test/apple/ \
--output_folder output
(3) Test on web camera:
python src/s5_test.py \
--model_path model/trained_classifier.pickle \
--data_type webcam \
--data_path 0 \
--output_folder output
if True: # Include project path
import sys
import os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../"
CURR_PATH = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.append(ROOT)
import utils.lib_images_io as lib_images_io
import utils.lib_plot as lib_plot
import utils.lib_commons as lib_commons
from utils.lib_openpose import SkeletonDetector
from utils.lib_tracker import Tracker
from utils.lib_tracker import Tracker
from utils.lib_classifier import ClassifierOnlineTest
from utils.lib_classifier import * # Import all sklearn related libraries
def par(path): # Pre-Append ROOT to the path if it's not absolute
return ROOT + path if (path and path[0] != "/") else path
# -- Command-line input
def get_command_line_arguments():
def parse_args():
parser = argparse.ArgumentParser(
description="Test action recognition on \n"
"(1) a video, (2) a folder of images, (3) or web camera.")
parser.add_argument("-m", "--model_path", required=False,
default='model/trained_classifier.pickle')
parser.add_argument("-t", "--data_type", required=False, default='webcam',
choices=["video", "folder", "webcam"])
parser.add_argument("-p", "--data_path", required=False, default="",
help="path to a video file, or images folder, or webcam. \n"
"For video and folder, the path should be "
"absolute or relative to this project's root. "
"For webcam, either input an index or device name. ")
parser.add_argument("-o", "--output_folder", required=False, default='output/',
help="Which folder to save result to.")
args = parser.parse_args()
return args
args = parse_args()
if args.data_type != "webcam" and args.data_path and args.data_path[0] != "/":
# If the path is not absolute, then its relative to the ROOT.
args.data_path = ROOT + args.data_path
return args
def get_dst_folder_name(src_data_type, src_data_path):
''' Compute a output folder name based on data_type and data_path.
The final output of this script looks like this:
DST_FOLDER/folder_name/vidoe.avi
DST_FOLDER/folder_name/skeletons/XXXXX.txt
'''
assert(src_data_type in ["video", "folder", "webcam"])
if src_data_type == "video": # /root/data/video.avi --> video
folder_name = str("video")
elif src_data_type == "folder": # /root/data/video/ --> video
folder_name = src_data_path.rstrip("/").split("/")[-1]
elif src_data_type == "webcam":
# month-day-hour-minute-seconds, e.g.: 02-26-15-51-12
folder_name = lib_commons.get_time_string()
return folder_name
args = get_command_line_arguments()
SRC_DATA_TYPE = args.data_type
SRC_DATA_PATH = args.data_path
SRC_MODEL_PATH = args.model_path
DST_FOLDER_NAME = get_dst_folder_name(SRC_DATA_TYPE, SRC_DATA_PATH)
# -- Settings
cfg_all = lib_commons.read_yaml(ROOT + "config/config.yaml")
cfg = cfg_all["s5_test.py"]
CLASSES = np.array(cfg_all["classes"])
SKELETON_FILENAME_FORMAT = cfg_all["skeleton_filename_format"]
# Action recognition: number of frames used to extract features.
WINDOW_SIZE = int(cfg_all["features"]["window_size"])
# Output folder
DST_FOLDER = args.output_folder + "/" + DST_FOLDER_NAME + "/"
DST_SKELETON_FOLDER_NAME = cfg["output"]["skeleton_folder_name"]
DST_VIDEO_NAME = cfg["output"]["video_name"]
# framerate of output video.avi
DST_VIDEO_FPS = float(cfg["output"]["video_fps"])
# writer
fmt = cv2.VideoWriter_fourcc(*'MP4V')
fps = 10.0
size = (800, 480)
writer = cv2.VideoWriter('outtest.mp4', fmt, fps, size)
backup_time = 3600 # sec
time = 0 # init
now = datetime.datetime.now()
s_now = now.strftime('%Y-%m-%d-%H-%M-%S')
back_up = cv2.VideoWriter(f'{s_now}.mp4',fmt, fps, size)
# Video setttings
# If data_type is webcam, set the max frame rate.
SRC_WEBCAM_MAX_FPS = float(cfg["settings"]["source"]
["webcam_max_framerate"])
# If data_type is video, set the sampling interval.
# For example, if it's 3, then the video will be read 3 times faster.
SRC_VIDEO_SAMPLE_INTERVAL = int(cfg["settings"]["source"]
["video_sample_interval"])
# Openpose settings
OPENPOSE_MODEL = cfg["settings"]["openpose"]["model"]
OPENPOSE_IMG_SIZE = cfg["settings"]["openpose"]["img_size"]
# Display settings
img_disp_desired_rows = int(cfg["settings"]["display"]["desired_rows"])
# -- Function
def select_images_loader(src_data_type, src_data_path):
if src_data_type == "video":
images_loader = lib_images_io.ReadFromVideo(
src_data_path,
sample_interval=SRC_VIDEO_SAMPLE_INTERVAL)
elif src_data_type == "folder":
images_loader = lib_images_io.ReadFromFolder(
folder_path=src_data_path)
elif src_data_type == "webcam":
if src_data_path == "":
webcam_idx = 0
elif src_data_path.isdigit():
webcam_idx = int(src_data_path)
else:
webcam_idx = src_data_path
images_loader = lib_images_io.ReadFromWebcam(
SRC_WEBCAM_MAX_FPS, webcam_idx)
return images_loader
class MultiPersonClassifier(object):
''' This is a wrapper around ClassifierOnlineTest
for recognizing actions of multiple people.
'''
def __init__(self, model_path, classes):
self.dict_id2clf = {} # human id -> classifier of this person
# Define a function for creating classifier for new people.
self._create_classifier = lambda human_id: ClassifierOnlineTest(
model_path, classes, WINDOW_SIZE, human_id)
def classify(self, dict_id2skeleton):
''' Classify the action type of each skeleton in dict_id2skeleton '''
# Clear people not in view
old_ids = set(self.dict_id2clf)
cur_ids = set(dict_id2skeleton)
humans_not_in_view = list(old_ids - cur_ids)
for human in humans_not_in_view:
del self.dict_id2clf[human]
# Predict each person's action
id2label = {}
for id, skeleton in dict_id2skeleton.items():
if id not in self.dict_id2clf: # add this new person
self.dict_id2clf[id] = self._create_classifier(id)
classifier = self.dict_id2clf[id]
id2label[id] = classifier.predict(skeleton) # predict label
# print("\n\nPredicting label for human{}".format(id))
# print(" skeleton: {}".format(skeleton))
# print(" label: {}".format(id2label[id]))
return id2label
def get_classifier(self, id):
''' Get the classifier based on the person id.
Arguments:
id {int or "min"}
'''
if len(self.dict_id2clf) == 0:
return None
if id == 'min':
id = min(self.dict_id2clf.keys())
return self.dict_id2clf[id]
def remove_skeletons_with_few_joints(skeletons):
''' Remove bad skeletons before sending to the tracker '''
good_skeletons = []
for skeleton in skeletons:
px = skeleton[2:2+13*2:2]
py = skeleton[3:2+13*2:2]
num_valid_joints = len([x for x in px if x != 0])
num_leg_joints = len([x for x in px[-6:] if x != 0])
total_size = max(py) - min(py)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# IF JOINTS ARE MISSING, TRY CHANGING THESE VALUES:
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if num_valid_joints >= 5 and total_size >= 0.1 and num_leg_joints >= 0:
# add this skeleton only when all requirements are satisfied
good_skeletons.append(skeleton)
return good_skeletons
def draw_result_img(img_disp, ith_img, humans, dict_id2skeleton,
skeleton_detector, multiperson_classifier):
''' Draw skeletons, labels, and prediction scores onto image for display '''
# Resize to a proper size for display
r, c = img_disp.shape[0:2]
desired_cols = int(1.0 * c * (img_disp_desired_rows / r))
img_disp = cv2.resize(img_disp,
dsize=(desired_cols, img_disp_desired_rows))
# Draw all people's skeleton
skeleton_detector.draw(img_disp, humans)
# Draw bounding box and label of each person
if len(dict_id2skeleton):
for id, label in dict_id2label.items():
skeleton = dict_id2skeleton[id]
# scale the y data back to original
skeleton[1::2] = skeleton[1::2] / scale_h
# print("Drawing skeleton: ", dict_id2skeleton[id], "with label:", label, ".")
lib_plot.draw_action_result(img_disp, id, skeleton, label)
# Add blank to the left for displaying prediction scores of each class
img_disp = lib_plot.add_white_region_to_left_of_image(img_disp)
cv2.putText(img_disp, "Frame:" + str(ith_img),
(20, 20), fontScale=1.5, fontFace=cv2.FONT_HERSHEY_PLAIN,
color=(0, 0, 0), thickness=2)
# Draw predicting score for only 1 person
if len(dict_id2skeleton):
classifier_of_a_person = multiperson_classifier.get_classifier(
id='min')
classifier_of_a_person.draw_scores_onto_image(img_disp)
return img_disp
# Time Display
cv2.putText(img2_disp, str(now_time.isoformat(timespec='seconds')),
(20, 20), fontScale=1.5, fontFace=cv2.FONT_HERSHEY_PLAIN,
color=(0, 0, 0), thickness=2)
# Draw predicting score for only 1 person
if len(dict2_id2skeleton):
classifier_of_a_person = multiperson_classifier.get_classifier(
id='min')
classifier_of_a_person.draw_scores_onto_image(img2_disp)
return img2_disp
def get_the_skeleton_data_to_save_to_disk(dict_id2skeleton):
'''
In each image, for each skeleton, save the:
human_id, label, and the skeleton positions of length 18*2.
So the total length per row is 2+36=38
'''
skels_to_save = []
for human_id in dict_id2skeleton.keys():
label = dict_id2label[human_id]
skeleton = dict_id2skeleton[human_id]
skels_to_save.append([[human_id, label] + skeleton.tolist()])
return skels_to_save
# -- Main
if __name__ == "__main__":
# -- Detector, tracker, classifier
skeleton_detector = SkeletonDetector(OPENPOSE_MODEL, OPENPOSE_IMG_SIZE)
multiperson_tracker = Tracker()
multiperson_classifier = MultiPersonClassifier(SRC_MODEL_PATH, CLASSES)
# -- Image reader and displayer
images_loader = select_images_loader(SRC_DATA_TYPE, SRC_DATA_PATH)
img_displayer = lib_images_io.ImageDisplayer()
# -- Init output
# output folder
os.makedirs(DST_FOLDER, exist_ok=True)
os.makedirs(DST_FOLDER + DST_SKELETON_FOLDER_NAME, exist_ok=True)
# -- Read images and process
try:
ith_img = -1
while images_loader.has_image():
# Timer
now = datetime.datetime.now()
now_time = now.time()
# -- Read image
#img = images_loader.read_image()
img = images_loader.read_image()
ith_img += 1
img_disp = img.copy()
# print(f"\nProcessing {ith_img}th image ...")
# -- Detect skeletons
humans = skeleton_detector.detect(img)
skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)
)
skeletons = remove_skeletons_with_few_joints(skeletons)
# -- Track people
dict_id2skeleton = multiperson_tracker.track(skeletons)
# -- Log & Recognize action of each person
log.append(str(now_time))
if len(dict_id2skeleton):
dict_id2label = multiperson_classifier.classify(
dict_id2skeleton)
# -- Draw
img_disp = draw_result_img(img_disp, ith_img, humans, dict_id2skeleton, skeleton_detector, multiperson_classifier)
# Print label of a person
if len(dict_id2skeleton):
min_id = min(dict_id2skeleton.keys())
# print("prediced label is :", dict_id2label[min_id])
# -- Display image, and write to video.avi
# video writer
if time >= backup_time*fps:
back_up.release()
now = datetime.datetime.now()
s_now = now.strftime('%Y-%m-%d-%H-%M-%S')
back_up = cv2.VideoWriter(f'{s_now}.mp4', fmt, fps, size)
time = 0
time += 1
writer.write(img_disp)
back_up.write(img_disp)
img_displayer.display(img_disp, wait_key_ms=1)
# -- Get skeleton data and save to file
skels_to_save = get_the_skeleton_data_to_save_to_disk(dict_id2skeleton)
lib_commons.save_listlist(DST_FOLDER + DST_SKELETON_FOLDER_NAME +SKELETON_FILENAME_FORMAT.format(ith_img),skels_to_save)
if cv2.waitKey(1) == 27:
break
finally:
writer.release()
back_up.release()
cv2.destroyAllWindows()
Thanks for your help.

How to call a function once in a while loop

I have a robotic code, that does the following:
camera starts processing and taking images
Mounting Holes (hough transform) function detection is activated
The holes are drawn on the image
approachcirlce function moves robot towards one of the set coordinates
I have two issues :
The mounting holes keep getting called even after detecting the coordinates once.
The robot in the approachcircle function cant move to one coordinates then onto the other. It keeps going back and forth as the x and y aren't specifically set to finish the first set of coordinates first. i.e : between two circles it does not reach either centers as expected. it never reaches the center of a detected circle if its more than one
I want the code to call the mountingholes function once and have the robot to move to each recorded coordinates, after the intial set of coordinates is done. I will have the robot move to another area and start doing the process again. I'm assuming the problem is that the functions are in the camera processing loop which is run indefinitely
The code is below:
##Def:
def approachcircle (r,t,z):
move = robot.Pose()*transl(r,t,z)
robot.MoveL(move)
def approacharea (z):
move = robot.Pose()*transl(0,0,z)
robot.MoveL(move)
def MountingHoles(img,thresh,r):
minR = r
CannyHighT = 50
min_points = 15 #param2
img_1= cv.cvtColor(img,cv.COLOR_BGR2GRAY)
#img3 = cv2.inRange(img_1, thresh, 255)
circles = cv.HoughCircles(img_1,cv.HOUGH_GRADIENT, 1, 2*minR, param1=CannyHighT,
param2=min_points, minRadius=minR, maxRadius=220)
return circles
#Installation
from robolink import * # RoboDK API
from robodk import * # Robot toolbox
RDK = Robolink()
pose = eye()
ITEM_TYPE_ROBOT
RDK = robolink.Robolink()
robot = RDK.Item('TM12X')
import_install('cv2', 'opencv-python')
import cv2 as cv
import numpy as np
import numpy
#----------------------------------
# Settings
PROCESS_COUNT = -1 # How many frames to process before exiting. -1 means indefinitely.
CAM_NAME = "Camera"
DISPLAY_SETTINGS = True
WDW_NAME_PARAMS = 'RoboDK - Blob detection parameters'
DISPLAY_RESULT = True
WDW_NAME_RESULTS = 'RoboDK - Blob detections1'
# Calculate absolute XYZ position clicked from the camera in absolute coordinates:
cam_item = RDK.Item(CAM_NAME, ITEM_TYPE_CAMERA)
if not cam_item.Valid():
raise Exception("Camera not found! %s" % CAM_NAME)
cam_item.setParam('Open', 1) # Force the camera view to open
#----------------------------------------------
# Create an resizable result window
if DISPLAY_RESULT:
cv.namedWindow(WDW_NAME_RESULTS) #, cv.WINDOW_NORMAL)
#----------------------------------------------
# capture = cv.VideoCapture(0)
# retval, image = capture.read()
#----------------------------------------------
# Process camera frames
count = 0
while count < PROCESS_COUNT or PROCESS_COUNT < 0:
print("=============================================")
print("Processing image %i" % count)
count += 1
#----------------------------------------------
# Get the image from RoboDK
bytes_img = RDK.Cam2D_Snapshot("", cam_item)
if bytes_img == b'':
raise
# Image from RoboDK are BGR, uchar, (h,w,3)
nparr = np.frombuffer(bytes_img, np.uint8)
img = cv.imdecode(nparr, cv.IMREAD_UNCHANGED)
if img is None or img.shape == ():
raise
#----------------------------------------------
# Detect blobs
keypoints = MountingHoles(img,250,50)
i = 0
#----------------------------------------------
# Display the detection to the user (reference image and camera image side by side, with detection results)
if DISPLAY_RESULT:
# Draw detected blobs and their id
i = 0
for keypoint in keypoints[0,:]:
cv.putText(img, str(i), (int(keypoint[0]), int(keypoint[1])), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
cv.circle(img, (int(keypoint[0]), int(keypoint[1])), int(keypoint[2]), (0, 0, 255), 15)
#
i += 1
# Resize the image, so that it fits your screen
img = cv.resize(img, (int(img.shape[1] * .75), int(img.shape[0] * .75)))#
cv.imshow(WDW_NAME_RESULTS, img)
key = cv.waitKey(500)
if key == 27:
break # User pressed ESC, exit
if cv.getWindowProperty(WDW_NAME_RESULTS, cv.WND_PROP_VISIBLE) < 1:
break # User killed the window, exit
#--------------------------------------------------------------------------------------------
# Movement functions
r=0
t=0
i=0
#approacharea(200)
for keypoint in keypoints[0,:]:
#print("id:%i coord=(%0.0f, %0.0f)" % (i, X, Y))
X= int(keypoint[0])-320
Y=int(keypoint[1])-240
r=int(keypoint[2])
print("id:%i coord=(%0.0f, %0.0f)" % (i, X, Y))
if X!= 0 or Y!=0 :
r=X*0.1
t=Y*0.1
approachcircle(r,t,0)
i+=1

Set up correctly variables in Python

So i'm trying to launch a Python script (originally available from here : https://github.com/dvdtho/python-photo-mosaic). Full code at the bottom of this post.
This basically creates a mosaic (from a source image), with the final image (mosaic) is composed of several other images (tiles).
My question is how I am supposed to fill the variables (the ones at line 212) in order to run the script (through Eclispe in my case).
Should I put directly something like this? (in my case the folder Desktop/tiles contains all the jpg files) :
tile_paths = glob.glob("C:/Users/Sylvia/Desktop/tiles/*.jpg") # I've added this line myself
def create_mosaic(source_path="C:\\Users\\Sylvia\\Desktop\\source\\1.jpg", target="C:\\Users\\Sylvia\\Desktop\\source\\result.jpg", tile_ratio=1920/800, tile_width=75, enlargement=8, reuse=True, color_mode='RGB', tile_paths=None, shuffle_first=30):
Last time i tried i got this error :
def create_mosaic(source, target, tile_ratio=1920/800, tile_width=75,
enlargement=8, reuse=True, color_mode='RGB', tile_paths,
shuffle_first=30):
^ SyntaxError: non-default argument follows default argument
I'm very lost, hopefully someone can help me.
Here's the code :
import time
import itertools
import random
import sys
import numpy as np
from PIL import Image
from skimage import img_as_float
from skimage.measure import compare_mse
def shuffle_first_items(lst, i):
if not i:
return lst
first_few = lst[:i]
remaining = lst[i:]
random.shuffle(first_few)
return first_few + remaining
def bound(low, high, value):
return max(low, min(high, value))
class ProgressCounter:
def __init__(self, total):
self.total = total
self.counter = 0
def update(self):
self.counter += 1
sys.stdout.write("Progress: %s%% %s" % (100 * self.counter / self.total, "\r"))
sys.stdout.flush()
def img_mse(im1, im2):
"""Calculates the root mean square error (RSME) between two images"""
try:
return compare_mse(img_as_float(im1), img_as_float(im2))
except ValueError:
print(f'RMS issue, Img1: {im1.size[0]} {im1.size[1]}, Img2: {im2.size[0]} {im2.size[1]}')
raise KeyboardInterrupt
def resize_box_aspect_crop_to_extent(img, target_aspect, centerpoint=None):
width = img.size[0]
height = img.size[1]
if not centerpoint:
centerpoint = (int(width / 2), int(height / 2))
requested_target_x = centerpoint[0]
requested_target_y = centerpoint[1]
aspect = width / float(height)
if aspect > target_aspect:
# Then crop the left and right edges:
new_width = int(target_aspect * height)
new_width_half = int(new_width/2)
target_x = bound(new_width_half, width-new_width_half, requested_target_x)
left = target_x - new_width_half
right = target_x + new_width_half
resize = (left, 0, right, height)
else:
# ... crop the top and bottom:
new_height = int(width / target_aspect)
new_height_half = int(new_height/2)
target_y = bound(new_height_half, height-new_height_half, requested_target_y)
top = target_y - new_height_half
bottom = target_y + new_height_half
resize = (0, top, width, bottom)
return resize
def aspect_crop_to_extent(img, target_aspect, centerpoint=None):
'''
Crop an image to the desired perspective at the maximum size available.
Centerpoint can be provided to focus the crop to one side or another -
eg just cut the left side off if interested in the right side.
target_aspect = width / float(height)
centerpoint = (width, height)
'''
resize = resize_box_aspect_crop_to_extent(img, target_aspect, centerpoint)
return img.crop(resize)
class Config:
def __init__(self, tile_ratio=1920/800, tile_width=50, enlargement=8, color_mode='RGB'):
self.tile_ratio = tile_ratio # 2.4
self.tile_width = tile_width # height/width of mosaic tiles in pixels
self.enlargement = enlargement # mosaic image will be this many times wider and taller than original
self.color_mode = color_mode # mosaic image will be this many times wider and taller than original
#property
def tile_height(self):
return int(self.tile_width / self.tile_ratio)
#property
def tile_size(self):
return self.tile_width, self.tile_height # PIL expects (width, height)
class TileBox:
"""
Container to import, process, hold, and compare all of the tiles
we have to make the mosaic with.
"""
def __init__(self, tile_paths, config):
self.config = config
self.tiles = list()
self.prepare_tiles_from_paths(tile_paths)
def __process_tile(self, tile_path):
with Image.open(tile_path) as i:
img = i.copy()
img = aspect_crop_to_extent(img, self.config.tile_ratio)
large_tile_img = img.resize(self.config.tile_size, Image.ANTIALIAS).convert(self.config.color_mode)
self.tiles.append(large_tile_img)
return True
def prepare_tiles_from_paths(self, tile_paths):
print('Reading tiles from provided list...')
progress = ProgressCounter(len(tile_paths))
for tile_path in tile_paths:
progress.update()
self.__process_tile(tile_path)
print('Processed tiles.')
return True
def best_tile_block_match(self, tile_block_original):
match_results = [img_mse(t, tile_block_original) for t in self.tiles]
best_fit_tile_index = np.argmin(match_results)
return best_fit_tile_index
def best_tile_from_block(self, tile_block_original, reuse=False):
if not self.tiles:
print('Ran out of images.')
raise KeyboardInterrupt
#start_time = time.time()
i = self.best_tile_block_match(tile_block_original)
#print("BLOCK MATCH took --- %s seconds ---" % (time.time() - start_time))
match = self.tiles[i].copy()
if not reuse:
del self.tiles[i]
return match
class SourceImage:
"""Processing original image - scaling and cropping as needed."""
def __init__(self, image_path, config):
print('Processing main image...')
self.image_path = image_path
self.config = config
with Image.open(self.image_path) as i:
img = i.copy()
w = img.size[0] * self.config.enlargement
h = img.size[1] * self.config.enlargement
large_img = img.resize((w, h), Image.ANTIALIAS)
w_diff = (w % self.config.tile_width)/2
h_diff = (h % self.config.tile_height)/2
# if necesary, crop the image slightly so we use a
# whole number of tiles horizontally and vertically
if w_diff or h_diff:
large_img = large_img.crop((w_diff, h_diff, w - w_diff, h - h_diff))
self.image = large_img.convert(self.config.color_mode)
print('Main image processed.')
class MosaicImage:
"""Holder for the mosaic"""
def __init__(self, original_img, target, config):
self.config = config
self.target = target
# Lets just start with original image, scaled up, instead of a blank one
self.image = original_img
# self.image = Image.new(original_img.mode, original_img.size)
self.x_tile_count = int(original_img.size[0] / self.config.tile_width)
self.y_tile_count = int(original_img.size[1] / self.config.tile_height)
self.total_tiles = self.x_tile_count * self.y_tile_count
print(f'Mosaic will be {self.x_tile_count:,} tiles wide and {self.y_tile_count:,} tiles high ({self.total_tiles:,} total).')
def add_tile(self, tile, coords):
"""Adds the provided image onto the mosiac at the provided coords."""
try:
self.image.paste(tile, coords)
except TypeError as e:
print('Maybe the tiles are not the right size. ' + str(e))
def save(self):
self.image.save(self.target)
def coords_from_middle(x_count, y_count, y_bias=1, shuffle_first=0, ):
'''
Lets start in the middle where we have more images.
And we dont get "lines" where the same-best images
get used at the start.
y_bias - if we are using non-square coords, we can
influence the order to be closer to the real middle.
If width is 2x height, y_bias should be 2.
shuffle_first - We can suffle the first X coords
so that we dont use all the same-best images
in the same spot - in the middle
from movies.mosaic_mem import coords_from_middle
x = 10
y = 10
coords_from_middle(x, y, y_bias=2, shuffle_first=0)
'''
x_mid = int(x_count/2)
y_mid = int(y_count/2)
coords = list(itertools.product(range(x_count), range(y_count)))
coords.sort(key=lambda c: abs(c[0]-x_mid)*y_bias + abs(c[1]-y_mid))
coords = shuffle_first_items(coords, shuffle_first)
return coords
def create_mosaic(source_path, target, tile_ratio=1920/800, tile_width=75, enlargement=8, reuse=True, color_mode='RGB', tile_paths=None, shuffle_first=30):
"""Forms an mosiac from an original image using the best
tiles provided. This reads, processes, and keeps in memory
a copy of the source image, and all the tiles while processing.
Arguments:
source_path -- filepath to the source image for the mosiac
target -- filepath to save the mosiac
tile_ratio -- height/width of mosaic tiles in pixels
tile_width -- width of mosaic tiles in pixels
enlargement -- mosaic image will be this many times wider and taller than the original
reuse -- Should we reuse tiles in the mosaic, or just use each tile once?
color_mode -- L for greyscale or RGB for color
tile_paths -- List of filepaths to your tiles
shuffle_first -- Mosiac will be filled out starting in the center for best effect. Also,
we will shuffle the order of assessment so that all of our best images aren't
necessarily in one spot.
"""
config = Config(
tile_ratio = tile_ratio, # height/width of mosaic tiles in pixels
tile_width = tile_width, # height/width of mosaic tiles in pixels
enlargement = enlargement, # the mosaic image will be this many times wider and taller than the original
color_mode = color_mode, # L for greyscale or RGB for color
)
# Pull in and Process Original Image
print('Setting Up Target image')
source_image = SourceImage(source_path, config)
# Setup Mosaic
mosaic = MosaicImage(source_image.image, target, config)
# Assest Tiles, and save if needed, returns directories where the small and large pictures are stored
print('Assessing Tiles')
tile_box = TileBox(tile_paths, config)
try:
progress = ProgressCounter(mosaic.total_tiles)
for x, y in coords_from_middle(mosaic.x_tile_count, mosaic.y_tile_count, y_bias=config.tile_ratio, shuffle_first=shuffle_first):
progress.update()
# Make a box for this sector
box_crop = (x * config.tile_width, y * config.tile_height, (x + 1) * config.tile_width, (y + 1) * config.tile_height)
# Get Original Image Data for this Sector
comparison_block = source_image.image.crop(box_crop)
# Get Best Image name that matches the Orig Sector image
tile_match = tile_box.best_tile_from_block(comparison_block, reuse=reuse)
# Add Best Match to Mosaic
mosaic.add_tile(tile_match, box_crop)
# Saving Every Sector
mosaic.save()
except KeyboardInterrupt:
print('\nStopping, saving partial image...')
finally:
mosaic.save()
It's ok, this is the new file i have to create in order for it to work :
create_mosaic(
subject="/path/to/source/image",
target="/path/to/output/image",
tile_paths=["/path/to/tile_1" , ... "/path/to/tile_n"],
tile_ratio=1920/800, # Crop tiles to be height/width ratio
tile_width=300, # Tile will be scaled
enlargement=20, # Mosiac will be this times larger than original
reuse=False, # Should tiles be used multiple times?
color_mode='L', # RGB (color) L (greyscale)
)
Problem resovled.

python pyparrot image processing question

I'm trying to build code that wants to fly a drone with a camera with demoMamboVisionGUI.py below. When the code is executed, the camera screen comes up and press the button to start the flight. The code above displays four cam screens and detects a straight line while detecting a specified color value, blue (BGR2HSV). Using these two codes, the camera recognizes the blue straight line and flies forward little by little, and turns left and right at a certain angle, recognizes the bottom of the specified color (red), lands, and starts flying with another button. I want to make a code that recognizes green and lands. I would appreciate it if you could throw a simple hint.
enter image description here
import cv2
import numpy as np
def im_trim(img):
x = 160
y = 50
w = 280
h = 180
img_trim = img[y:y + h, x:x + w]
return img_trim
def go():
minimum = 9999;
min_theta=0;
try:
cap = cv2.VideoCapture(0)
except:
return
while True:
ret, P = cap.read()
img1 = P
cv2.imshow('asdf',img1)
img_HSV = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
img_h, img_s, img_v = cv2.split(img_HSV)
cv2.imshow("HSV", img_HSV)
lower_b = np.array([100, 80, 100])
upper_b = np.array([120, 255, 255])
blue = cv2.inRange(img_HSV, lower_b, upper_b)
cv2.imshow('root',blue)
edges = cv2.Canny(blue, 50, 150, apertureSize =3)
lines = cv2.HoughLines(edges, 1, np.pi/180, threshold = 100)
if lines is not None:
for line in lines:
r, theta = line[0]
#if (r<minimum and r>0) and (np.rad2deg(theta)>-90 and np.rad2deg(theta)<90):
#minimum = r
#min_theta = theta
#if (r > 0 and r < 250) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=-7, yaw=0, vertical_movement=0,
# duration=1)
#print("right")
#elif (r > 400 and r < 650) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=7, yaw=0, vertical_movement=0,
# duration=1)
print(r, np.rad2deg(theta))
#이하 if문을 while 문을 통해 반복하여 길 경로를 직진경로로 만든 이후 진행
#if(np.rad2deg(min_theta)>=몇도이상 or 이하):
# 이하 -> 왼쪽턴, 이상 -> 오른쪽턴, 사이 -> 직진
a = np.cos(theta)
b = np.sin(theta)
x0 = a * r
y0 = b * r
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img1, (x1,y1), (x2,y2), (0,255,0), 3)
cv2.imshow('hough',img1)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
go()
print("??")
================================================================================================
"""
Demo of the Bebop vision using DroneVisionGUI that relies on libVLC. It is a different
multi-threaded approach than DroneVision
Author: Amy McGovern
"""
from pyparrot.Minidrone import Mambo
from pyparrot.DroneVisionGUI import DroneVisionGUI
import cv2
# set this to true if you want to fly for the demo
testFlying = True
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
# print("in save pictures on image %d " % self.index)
img = self.vision.get_latest_valid_picture()
if (img is not None):
filename = "test_image_%06d.png" % self.index
# uncomment this if you want to write out images every time you get a new one
#cv2.imwrite(filename, img)
self.index +=1
#print(self.index)
def demo_mambo_user_vision_function(mamboVision, args):
"""
Demo the user code to run with the run button for a mambo
:param args:
:return:
"""
mambo = args[0]
if (testFlying):
print("taking off!")
mambo.safe_takeoff(5)
if (mambo.sensors.flying_state != "emergency"):
print("flying state is %s" % mambo.sensors.flying_state)
print("Flying direct: going up")
mambo.fly_direct(roll=0, pitch=0, yaw=0, vertical_movement=15, duration=2)
print("flip left")
print("flying state is %s" % mambo.sensors.flying_state)
success = mambo.flip(direction="left")
print("mambo flip result %s" % success)
mambo.smart_sleep(5)
print("landing")
print("flying state is %s" % mambo.sensors.flying_state)
mambo.safe_land(5)
else:
print("Sleeeping for 15 seconds - move the mambo around")
mambo.smart_sleep(15)
# done doing vision demo
print("Ending the sleep and vision")
mamboVision.close_video()
mambo.smart_sleep(5)
print("disconnecting")
mambo.disconnect()
if __name__ == "__main__":
# you will need to change this to the address of YOUR mambo
mamboAddr = "B0:FC:36:F4:37:F9"
# make my mambo object
# remember to set True/False for the wifi depending on if you are using the wifi or the BLE to connect
mambo = Mambo(mamboAddr, use_wifi=True)
print("trying to connect to mambo now")
success = mambo.connect(num_retries=3)
print("connected: %s" % success)
if (success):
# get the state information
print("sleeping")
mambo.smart_sleep(1)
mambo.ask_for_state_update()
mambo.smart_sleep(1)
print("Preparing to open vision")
mamboVision = DroneVisionGUI(mambo, is_bebop=False, buffer_size=200,
user_code_to_run=demo_mambo_user_vision_function, user_args=(mambo, ))
userVision = UserVision(mamboVision)
mamboVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
mamboVision.open_video()
==========================================================================

data type errors for input images of cv2.calcOpticalFlowPyrLK

I'm running opencv 2.4.1 using python bindings and am having difficulty calculating the optical flow.
Specifically this section of code:
#calculate the opticalflow
if prev_saturation_thresh_img==None:
prev_saturation_thresh_img=saturation_img
if i >=0:
prev_img=prev_saturation_thresh_img
next_img=saturation_thresh_img
p1, st, err = cv2.calcOpticalFlowPyrLK(prev_img,next_img,tracks_np,**lk_params)
Returns the error:
<unknown> is not a numpy array
So then I try to convert the images to numpy arrays:
prev_img=prev_saturation_thresh_img
next_img=saturation_thresh_img
Now I have a new error:
<unknown> data type = 17 is not supported
In a last-ditch effort I convert the images to cvmat (from iplimage) before converting it to a numpy array, just to see what happens
error: ..\..\..\OpenCV-2.4.1\modules\video\src\lkpyramid.cpp:607: error: (-215) nextPtsMat.checkVector(2, CV_32F, true) == npoints
So now I'm stuck. Below is the code in it's entirety for reference
import cv
import cv2
import numpy as np
class Target:
def __init__(self):
self.capture = cv.CaptureFromFile("raw_gait_cropped.avi")
def run(self):
#initiate font
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
#instantiate images
img_size=cv.GetSize(cv.QueryFrame(self.capture))
hsv_img=cv.CreateImage(img_size,8,3)
saturation_img=cv.CreateImage(img_size,8,1)
saturation_thresh_img=cv.CreateImage(img_size,8,1)
prev_saturation_thresh_img=None
#create params for GoodFeaturesToTrack and calcOpticalFlowPyrLK
gftt_params = dict( cornerCount=11,
qualityLevel=0.2,
minDistance=5,
mask=None,
useHarris=True
)
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
flags = cv2.OPTFLOW_USE_INITIAL_FLOW,
minEigThreshold=1
)
tracks=[]
writer=cv.CreateVideoWriter("angle_tracking.avi",cv.CV_FOURCC('M','J','P','G'),30,cv.GetSize(hsv_img),1)
i=0
while True:
#grab a frame from the video capture
img=cv.QueryFrame(self.capture)
#break the loop when the video is over
if img == None:
break
#convert the image to HSV
cv.CvtColor(img,hsv_img,cv.CV_BGR2HSV)
#Get Saturation channel
cv.MixChannels([hsv_img],[saturation_img],[(1,0)])
#Apply threshold to saturation channel
cv.InRangeS(saturation_img,145,255,saturation_thresh_img)
#locate initial features to track
if i==0:
eig_image=temp_image = cv.CreateMat(img.height, img.width, cv.CV_32FC1)
for (x,y) in cv.GoodFeaturesToTrack(saturation_thresh_img, eig_image, temp_image, **gftt_params):
tracks.append([(x,y)])
cv.Circle(saturation_thresh_img,(int(x),int(y)),5,(255,255,255),-1,cv.CV_AA,0)
tracks_np=np.float32(tracks).reshape(-1,2)
print tracks
#calculate the opticalflow
if prev_saturation_thresh_img==None:
prev_saturation_thresh_img=saturation_img
if i >=0:
prev_img=prev_saturation_thresh_img
next_img=saturation_thresh_img
p1, st, err = cv2.calcOpticalFlowPyrLK(prev_img,next_img,tracks_np,**lk_params)
prev_saturation_thresh_img=saturation_img
i=i+1
print i
#display frames to users
cv.ShowImage("Raw Video",img)
cv.ShowImage("Saturation Channel",saturation_img)
cv.ShowImage("Saturation Thresholded",saturation_thresh_img)
# Listen for ESC or ENTER key
c = cv.WaitKey(7) % 0x100
if c == 27 or c == 10:
break
#close all windows once video is done
cv.DestroyAllWindows()
if __name__=="__main__":
t = Target()
t.run()
OpenCV can be very picky about the data formats it accepts. The following code extract works for me:
prev = cv.LoadImage('images/'+file_list[0])
prev = np.asarray(prev[:,:])
prev_gs = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
current = cv.LoadImage('images/'+file)
current = np.asarray(current[:,:])
current_gs = cv2.cvtColor(current, cv2.COLOR_BGR2GRAY)
features, status, track_error = cv2.calcOpticalFlowPyrLK(prev_gs, current_gs, good_features, None,
**lk_params)
Note the [:,:] when converting from images to numpy arrays, I have found that they are required.
I hope that this may solve your problem.

Categories