How to get USB controller/gamepad to work with python - python

I have a USB controller that I'm trying to get inputs from, a Microsoft® SideWinder® Plug & Play Game Pad. I'm having difficulties trying to figure out how to receive its inputs correctly. Unfortunately, I cannot use pygame as it requires a window to receive inputs from, but I have to generate a pyglet window (via PsychoPy) to run my program. With pygame it can connect and show the state of buttons, but it cannot receive inputs without creating a window. I tried looking for other libraries, but all I encountered was Inputs, which isn't compatible with my controller (doesn't detect the device after installing). The controller itself works as I've tested it with an online gamepad tester. PsychoPy's joystick API is currently broken and does not work, so no luck there either.
I was really hoping anyone had advice on how to receive inputs from my controller/gamepad into my program?

For Windows you can use the WINMM.dll directly.
Use the ctypes library to load the dll (see Loading shared libraries). Use ctypes.WINFUNCTYPE to create prototypes for the function:
joyGetNumDevs
joyGetDevCapsW
joyGetPosEx
import ctypes
winmmdll = ctypes.WinDLL('winmm.dll')
# [joyGetNumDevs](https://learn.microsoft.com/en-us/windows/win32/api/joystickapi/nf-joystickapi-joygetnumdevs)
"""
UINT joyGetNumDevs();
"""
joyGetNumDevs_proto = ctypes.WINFUNCTYPE(ctypes.c_uint)
joyGetNumDevs_func = joyGetNumDevs_proto(("joyGetNumDevs", winmmdll))
# [joyGetDevCaps](https://learn.microsoft.com/en-us/windows/win32/api/joystickapi/nf-joystickapi-joygetdevcaps)
"""
MMRESULT joyGetDevCaps(UINT uJoyID, LPJOYCAPS pjc, UINT cbjc);
32 bit: joyGetDevCapsA
64 bit: joyGetDevCapsW
sizeof(JOYCAPS): 728
"""
joyGetDevCaps_proto = ctypes.WINFUNCTYPE(ctypes.c_uint, ctypes.c_uint, ctypes.c_void_p, ctypes.c_uint)
joyGetDevCaps_param = (1, "uJoyID", 0), (1, "pjc", None), (1, "cbjc", 0)
joyGetDevCaps_func = joyGetDevCaps_proto(("joyGetDevCapsW", winmmdll), joyGetDevCaps_param)
# [joyGetPosEx](https://learn.microsoft.com/en-us/windows/win32/api/joystickapi/nf-joystickapi-joygetposex)
"""
MMRESULT joyGetPosEx(UINT uJoyID, LPJOYINFOEX pji);
sizeof(JOYINFOEX): 52
"""
joyGetPosEx_proto = ctypes.WINFUNCTYPE(ctypes.c_uint, ctypes.c_uint, ctypes.c_void_p)
joyGetPosEx_param = (1, "uJoyID", 0), (1, "pji", None)
joyGetPosEx_func = joyGetPosEx_proto(("joyGetPosEx", winmmdll), joyGetPosEx_param)
Create the python function joyGetNumDevs, joyGetDevCaps and joyGetPosEx which delegate to the DLL. And create classes for the types JOYCAPS respectively JOYINFOEX:
# joystickapi - joyGetNumDevs
def joyGetNumDevs():
try:
num = joyGetNumDevs_func()
except:
num = 0
return num
# joystickapi - joyGetDevCaps
def joyGetDevCaps(uJoyID):
try:
buffer = (ctypes.c_ubyte * JOYCAPS.SIZE_W)()
p1 = ctypes.c_uint(uJoyID)
p2 = ctypes.cast(buffer, ctypes.c_void_p)
p3 = ctypes.c_uint(JOYCAPS.SIZE_W)
ret_val = joyGetDevCaps_func(p1, p2, p3)
ret = (False, None) if ret_val != JOYERR_NOERROR else (True, JOYCAPS(buffer))
except:
ret = False, None
return ret
# joystickapi - joyGetPosEx
def joyGetPosEx(uJoyID):
try:
buffer = (ctypes.c_uint32 * (JOYINFOEX.SIZE // 4))()
buffer[0] = JOYINFOEX.SIZE
buffer[1] = JOY_RETURNALL
p1 = ctypes.c_uint(uJoyID)
p2 = ctypes.cast(buffer, ctypes.c_void_p)
ret_val = joyGetPosEx_func(p1, p2)
ret = (False, None) if ret_val != JOYERR_NOERROR else (True, JOYINFOEX(buffer))
except:
ret = False, None
return ret
JOYERR_NOERROR = 0
JOY_RETURNX = 0x00000001
JOY_RETURNY = 0x00000002
JOY_RETURNZ = 0x00000004
JOY_RETURNR = 0x00000008
JOY_RETURNU = 0x00000010
JOY_RETURNV = 0x00000020
JOY_RETURNPOV = 0x00000040
JOY_RETURNBUTTONS = 0x00000080
JOY_RETURNRAWDATA = 0x00000100
JOY_RETURNPOVCTS = 0x00000200
JOY_RETURNCENTERED = 0x00000400
JOY_USEDEADZONE = 0x00000800
JOY_RETURNALL = (JOY_RETURNX | JOY_RETURNY | JOY_RETURNZ | \
JOY_RETURNR | JOY_RETURNU | JOY_RETURNV | \
JOY_RETURNPOV | JOY_RETURNBUTTONS)
# joystickapi - JOYCAPS
class JOYCAPS:
SIZE_W = 728
OFFSET_V = 4 + 32*2
def __init__(self, buffer):
ushort_array = (ctypes.c_uint16 * 2).from_buffer(buffer)
self.wMid, self.wPid = ushort_array
wchar_array = (ctypes.c_wchar * 32).from_buffer(buffer, 4)
self.szPname = ctypes.cast(wchar_array, ctypes.c_wchar_p).value
uint_array = (ctypes.c_uint32 * 19).from_buffer(buffer, JOYCAPS.OFFSET_V)
self.wXmin, self.wXmax, self.wYmin, self.wYmax, self.wZmin, self.wZmax, \
self.wNumButtons, self.wPeriodMin, self.wPeriodMax, \
self.wRmin, self.wRmax, self.wUmin, self.wUmax, self.wVmin, self.wVmax, \
self.wCaps, self.wMaxAxes, self.wNumAxes, self.wMaxButtons = uint_array
# joystickapi - JOYINFOEX
class JOYINFOEX:
SIZE = 52
def __init__(self, buffer):
uint_array = (ctypes.c_uint32 * (JOYINFOEX.SIZE // 4)).from_buffer(buffer)
self.dwSize, self.dwFlags, \
self.dwXpos, self.dwYpos, self.dwZpos, self.dwRpos, self.dwUpos, self.dwVpos, \
self.dwButtons, self.dwButtonNumber, self.dwPOV, self.dwReserved1, self.dwReserved2 = uint_array
See the simple example to test the API:
import joystickapi
import msvcrt
import time
print("start")
num = joystickapi.joyGetNumDevs()
ret, caps, startinfo = False, None, None
for id in range(num):
ret, caps = joystickapi.joyGetDevCaps(id)
if ret:
print("gamepad detected: " + caps.szPname)
ret, startinfo = joystickapi.joyGetPosEx(id)
break
else:
print("no gamepad detected")
run = ret
while run:
time.sleep(0.1)
if msvcrt.kbhit() and msvcrt.getch() == chr(27).encode(): # detect ESC
run = False
ret, info = joystickapi.joyGetPosEx(id)
if ret:
btns = [(1 << i) & info.dwButtons != 0 for i in range(caps.wNumButtons)]
axisXYZ = [info.dwXpos-startinfo.dwXpos, info.dwYpos-startinfo.dwYpos, info.dwZpos-startinfo.dwZpos]
axisRUV = [info.dwRpos-startinfo.dwRpos, info.dwUpos-startinfo.dwUpos, info.dwVpos-startinfo.dwVpos]
if info.dwButtons:
print("buttons: ", btns)
if any([abs(v) > 10 for v in axisXYZ]):
print("axis:", axisXYZ)
if any([abs(v) > 10 for v in axisRUV]):
print("roation axis:", axisRUV)
print("end")
The api binding and example is provided in the GitHub repository python_windows_joystickapi

Related

I want to process a live stream from youtube with opencv

I am posting this while using a translation.
I would like to use a live stream on YouTube to action recognition.
Action recognition
https://github.com/felixchenfy/Realtime-Action-Recognition
Read Youtube live stream with opencv
How to read Youtube live stream using openCV python?
Youtube URL
https://www.youtube.com/watch?v=DjdUEyjx8GM
We have programmed it with reference to the above.The program runs normally for a few hours, but after 4 or 5 hours, I get an error saying that communication from the remote host has been lost.Checking the youtube video, the delivery does not seem to be interrupted.
A partial code for videocapture can be found here.
url = "https://www.youtube.com/watch?v=DjdUEyjx8GM"
video = pafy.new(url)
best = video.getbest(preftype="mp4")
class ReadFromVideo(object):
def __init__(self, video_path, sample_interval=1):
''' A video reader class for reading video frames from video.
Arguments:
video_path
sample_interval {int}: sample every kth image.
''' assert isinstance(sample_interval, int) and sample_interval >= 1
self.cnt_imgs = 0
self._is_stoped = False
self._video = cv2.VideoCapture(best.url)
ret, image = self._video.read()
ret, image2 = self._video2.read()
self._next_image = image
self._next_image2 = image2
self._sample_interval = sample_interval
self._fps = self.get_fps()
if not self._fps >= 0.0001:
import warnings
warnings.warn("Invalid fps of video: {}".format(video_path))
def has_image(self):
return self._next_image is not None
def get_curr_video_time(self):
return 1.0 / self._fps * self.cnt_imgs
def read_image(self):
image = self._next_image
image2 = self._next_image2
for i in range(self._sample_interval):
if self._video.isOpened():
ret, frame = self._video.read()
self._next_image = frame
ret, frame2 = self._video2.read()
self._next_image2 = frame2
else:
self._next_image = None
self._next_image2 = None
break
self.cnt_imgs += 1
return image, image2
def stop(self):
self._video.release()
self._video2.release()
self._is_stoped = True
def __del__(self):
if not self._is_stoped:
self.stop()
def get_fps(self):
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# With webcam get(CV_CAP_PROP_FPS) does not work.
# Let's see for ourselves.
# Get video properties
if int(major_ver) < 3:
fps = self._video.get(cv2.cv.CV_CAP_PROP_FPS)
else:
fps = self._video.get(cv2.CAP_PROP_FPS)
return fps
#!/usr/bin/env python
# coding: utf-8
'''
Test action recognition on
(1) a video, (2) a folder of images, (3) or web camera.
Input:
model: model/trained_classifier.pickle
Output:
result video: output/${video_name}/video.avi
result skeleton: output/${video_name}/skeleton_res/XXXXX.txt
visualization by cv2.imshow() in img_displayer
'''
The entire source code is here
'''
Example of usage:
(1) Test on video file:
python src/s5_test.py \
--model_path model/trained_classifier.pickle \
--data_type video \
--data_path data_test/exercise.avi \
--output_folder output
(2) Test on a folder of images:
python src/s5_test.py \
--model_path model/trained_classifier.pickle \
--data_type folder \
--data_path data_test/apple/ \
--output_folder output
(3) Test on web camera:
python src/s5_test.py \
--model_path model/trained_classifier.pickle \
--data_type webcam \
--data_path 0 \
--output_folder output
if True: # Include project path
import sys
import os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../"
CURR_PATH = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.append(ROOT)
import utils.lib_images_io as lib_images_io
import utils.lib_plot as lib_plot
import utils.lib_commons as lib_commons
from utils.lib_openpose import SkeletonDetector
from utils.lib_tracker import Tracker
from utils.lib_tracker import Tracker
from utils.lib_classifier import ClassifierOnlineTest
from utils.lib_classifier import * # Import all sklearn related libraries
def par(path): # Pre-Append ROOT to the path if it's not absolute
return ROOT + path if (path and path[0] != "/") else path
# -- Command-line input
def get_command_line_arguments():
def parse_args():
parser = argparse.ArgumentParser(
description="Test action recognition on \n"
"(1) a video, (2) a folder of images, (3) or web camera.")
parser.add_argument("-m", "--model_path", required=False,
default='model/trained_classifier.pickle')
parser.add_argument("-t", "--data_type", required=False, default='webcam',
choices=["video", "folder", "webcam"])
parser.add_argument("-p", "--data_path", required=False, default="",
help="path to a video file, or images folder, or webcam. \n"
"For video and folder, the path should be "
"absolute or relative to this project's root. "
"For webcam, either input an index or device name. ")
parser.add_argument("-o", "--output_folder", required=False, default='output/',
help="Which folder to save result to.")
args = parser.parse_args()
return args
args = parse_args()
if args.data_type != "webcam" and args.data_path and args.data_path[0] != "/":
# If the path is not absolute, then its relative to the ROOT.
args.data_path = ROOT + args.data_path
return args
def get_dst_folder_name(src_data_type, src_data_path):
''' Compute a output folder name based on data_type and data_path.
The final output of this script looks like this:
DST_FOLDER/folder_name/vidoe.avi
DST_FOLDER/folder_name/skeletons/XXXXX.txt
'''
assert(src_data_type in ["video", "folder", "webcam"])
if src_data_type == "video": # /root/data/video.avi --> video
folder_name = str("video")
elif src_data_type == "folder": # /root/data/video/ --> video
folder_name = src_data_path.rstrip("/").split("/")[-1]
elif src_data_type == "webcam":
# month-day-hour-minute-seconds, e.g.: 02-26-15-51-12
folder_name = lib_commons.get_time_string()
return folder_name
args = get_command_line_arguments()
SRC_DATA_TYPE = args.data_type
SRC_DATA_PATH = args.data_path
SRC_MODEL_PATH = args.model_path
DST_FOLDER_NAME = get_dst_folder_name(SRC_DATA_TYPE, SRC_DATA_PATH)
# -- Settings
cfg_all = lib_commons.read_yaml(ROOT + "config/config.yaml")
cfg = cfg_all["s5_test.py"]
CLASSES = np.array(cfg_all["classes"])
SKELETON_FILENAME_FORMAT = cfg_all["skeleton_filename_format"]
# Action recognition: number of frames used to extract features.
WINDOW_SIZE = int(cfg_all["features"]["window_size"])
# Output folder
DST_FOLDER = args.output_folder + "/" + DST_FOLDER_NAME + "/"
DST_SKELETON_FOLDER_NAME = cfg["output"]["skeleton_folder_name"]
DST_VIDEO_NAME = cfg["output"]["video_name"]
# framerate of output video.avi
DST_VIDEO_FPS = float(cfg["output"]["video_fps"])
# writer
fmt = cv2.VideoWriter_fourcc(*'MP4V')
fps = 10.0
size = (800, 480)
writer = cv2.VideoWriter('outtest.mp4', fmt, fps, size)
backup_time = 3600 # sec
time = 0 # init
now = datetime.datetime.now()
s_now = now.strftime('%Y-%m-%d-%H-%M-%S')
back_up = cv2.VideoWriter(f'{s_now}.mp4',fmt, fps, size)
# Video setttings
# If data_type is webcam, set the max frame rate.
SRC_WEBCAM_MAX_FPS = float(cfg["settings"]["source"]
["webcam_max_framerate"])
# If data_type is video, set the sampling interval.
# For example, if it's 3, then the video will be read 3 times faster.
SRC_VIDEO_SAMPLE_INTERVAL = int(cfg["settings"]["source"]
["video_sample_interval"])
# Openpose settings
OPENPOSE_MODEL = cfg["settings"]["openpose"]["model"]
OPENPOSE_IMG_SIZE = cfg["settings"]["openpose"]["img_size"]
# Display settings
img_disp_desired_rows = int(cfg["settings"]["display"]["desired_rows"])
# -- Function
def select_images_loader(src_data_type, src_data_path):
if src_data_type == "video":
images_loader = lib_images_io.ReadFromVideo(
src_data_path,
sample_interval=SRC_VIDEO_SAMPLE_INTERVAL)
elif src_data_type == "folder":
images_loader = lib_images_io.ReadFromFolder(
folder_path=src_data_path)
elif src_data_type == "webcam":
if src_data_path == "":
webcam_idx = 0
elif src_data_path.isdigit():
webcam_idx = int(src_data_path)
else:
webcam_idx = src_data_path
images_loader = lib_images_io.ReadFromWebcam(
SRC_WEBCAM_MAX_FPS, webcam_idx)
return images_loader
class MultiPersonClassifier(object):
''' This is a wrapper around ClassifierOnlineTest
for recognizing actions of multiple people.
'''
def __init__(self, model_path, classes):
self.dict_id2clf = {} # human id -> classifier of this person
# Define a function for creating classifier for new people.
self._create_classifier = lambda human_id: ClassifierOnlineTest(
model_path, classes, WINDOW_SIZE, human_id)
def classify(self, dict_id2skeleton):
''' Classify the action type of each skeleton in dict_id2skeleton '''
# Clear people not in view
old_ids = set(self.dict_id2clf)
cur_ids = set(dict_id2skeleton)
humans_not_in_view = list(old_ids - cur_ids)
for human in humans_not_in_view:
del self.dict_id2clf[human]
# Predict each person's action
id2label = {}
for id, skeleton in dict_id2skeleton.items():
if id not in self.dict_id2clf: # add this new person
self.dict_id2clf[id] = self._create_classifier(id)
classifier = self.dict_id2clf[id]
id2label[id] = classifier.predict(skeleton) # predict label
# print("\n\nPredicting label for human{}".format(id))
# print(" skeleton: {}".format(skeleton))
# print(" label: {}".format(id2label[id]))
return id2label
def get_classifier(self, id):
''' Get the classifier based on the person id.
Arguments:
id {int or "min"}
'''
if len(self.dict_id2clf) == 0:
return None
if id == 'min':
id = min(self.dict_id2clf.keys())
return self.dict_id2clf[id]
def remove_skeletons_with_few_joints(skeletons):
''' Remove bad skeletons before sending to the tracker '''
good_skeletons = []
for skeleton in skeletons:
px = skeleton[2:2+13*2:2]
py = skeleton[3:2+13*2:2]
num_valid_joints = len([x for x in px if x != 0])
num_leg_joints = len([x for x in px[-6:] if x != 0])
total_size = max(py) - min(py)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# IF JOINTS ARE MISSING, TRY CHANGING THESE VALUES:
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if num_valid_joints >= 5 and total_size >= 0.1 and num_leg_joints >= 0:
# add this skeleton only when all requirements are satisfied
good_skeletons.append(skeleton)
return good_skeletons
def draw_result_img(img_disp, ith_img, humans, dict_id2skeleton,
skeleton_detector, multiperson_classifier):
''' Draw skeletons, labels, and prediction scores onto image for display '''
# Resize to a proper size for display
r, c = img_disp.shape[0:2]
desired_cols = int(1.0 * c * (img_disp_desired_rows / r))
img_disp = cv2.resize(img_disp,
dsize=(desired_cols, img_disp_desired_rows))
# Draw all people's skeleton
skeleton_detector.draw(img_disp, humans)
# Draw bounding box and label of each person
if len(dict_id2skeleton):
for id, label in dict_id2label.items():
skeleton = dict_id2skeleton[id]
# scale the y data back to original
skeleton[1::2] = skeleton[1::2] / scale_h
# print("Drawing skeleton: ", dict_id2skeleton[id], "with label:", label, ".")
lib_plot.draw_action_result(img_disp, id, skeleton, label)
# Add blank to the left for displaying prediction scores of each class
img_disp = lib_plot.add_white_region_to_left_of_image(img_disp)
cv2.putText(img_disp, "Frame:" + str(ith_img),
(20, 20), fontScale=1.5, fontFace=cv2.FONT_HERSHEY_PLAIN,
color=(0, 0, 0), thickness=2)
# Draw predicting score for only 1 person
if len(dict_id2skeleton):
classifier_of_a_person = multiperson_classifier.get_classifier(
id='min')
classifier_of_a_person.draw_scores_onto_image(img_disp)
return img_disp
# Time Display
cv2.putText(img2_disp, str(now_time.isoformat(timespec='seconds')),
(20, 20), fontScale=1.5, fontFace=cv2.FONT_HERSHEY_PLAIN,
color=(0, 0, 0), thickness=2)
# Draw predicting score for only 1 person
if len(dict2_id2skeleton):
classifier_of_a_person = multiperson_classifier.get_classifier(
id='min')
classifier_of_a_person.draw_scores_onto_image(img2_disp)
return img2_disp
def get_the_skeleton_data_to_save_to_disk(dict_id2skeleton):
'''
In each image, for each skeleton, save the:
human_id, label, and the skeleton positions of length 18*2.
So the total length per row is 2+36=38
'''
skels_to_save = []
for human_id in dict_id2skeleton.keys():
label = dict_id2label[human_id]
skeleton = dict_id2skeleton[human_id]
skels_to_save.append([[human_id, label] + skeleton.tolist()])
return skels_to_save
# -- Main
if __name__ == "__main__":
# -- Detector, tracker, classifier
skeleton_detector = SkeletonDetector(OPENPOSE_MODEL, OPENPOSE_IMG_SIZE)
multiperson_tracker = Tracker()
multiperson_classifier = MultiPersonClassifier(SRC_MODEL_PATH, CLASSES)
# -- Image reader and displayer
images_loader = select_images_loader(SRC_DATA_TYPE, SRC_DATA_PATH)
img_displayer = lib_images_io.ImageDisplayer()
# -- Init output
# output folder
os.makedirs(DST_FOLDER, exist_ok=True)
os.makedirs(DST_FOLDER + DST_SKELETON_FOLDER_NAME, exist_ok=True)
# -- Read images and process
try:
ith_img = -1
while images_loader.has_image():
# Timer
now = datetime.datetime.now()
now_time = now.time()
# -- Read image
#img = images_loader.read_image()
img = images_loader.read_image()
ith_img += 1
img_disp = img.copy()
# print(f"\nProcessing {ith_img}th image ...")
# -- Detect skeletons
humans = skeleton_detector.detect(img)
skeletons, scale_h = skeleton_detector.humans_to_skels_list(humans)
)
skeletons = remove_skeletons_with_few_joints(skeletons)
# -- Track people
dict_id2skeleton = multiperson_tracker.track(skeletons)
# -- Log & Recognize action of each person
log.append(str(now_time))
if len(dict_id2skeleton):
dict_id2label = multiperson_classifier.classify(
dict_id2skeleton)
# -- Draw
img_disp = draw_result_img(img_disp, ith_img, humans, dict_id2skeleton, skeleton_detector, multiperson_classifier)
# Print label of a person
if len(dict_id2skeleton):
min_id = min(dict_id2skeleton.keys())
# print("prediced label is :", dict_id2label[min_id])
# -- Display image, and write to video.avi
# video writer
if time >= backup_time*fps:
back_up.release()
now = datetime.datetime.now()
s_now = now.strftime('%Y-%m-%d-%H-%M-%S')
back_up = cv2.VideoWriter(f'{s_now}.mp4', fmt, fps, size)
time = 0
time += 1
writer.write(img_disp)
back_up.write(img_disp)
img_displayer.display(img_disp, wait_key_ms=1)
# -- Get skeleton data and save to file
skels_to_save = get_the_skeleton_data_to_save_to_disk(dict_id2skeleton)
lib_commons.save_listlist(DST_FOLDER + DST_SKELETON_FOLDER_NAME +SKELETON_FILENAME_FORMAT.format(ith_img),skels_to_save)
if cv2.waitKey(1) == 27:
break
finally:
writer.release()
back_up.release()
cv2.destroyAllWindows()
Thanks for your help.

Attribute Error: module 'uctypes' has no attribute 'INT32'

When I try to run rcReceiver_IBUS.py, I am getting this error:
module 'uctypes' has no attribute 'INT32'
timer.py line 17
Your program tries to access attribute INT32 of an object of type 'uctypes', but this type doesn't have such attribute.
.
rcReceiver_IBUS.py
from machine import UART
import time
buffer=bytearray(31)
uart = UART(1, 115200) # uart1 tx-pin 4, rx-pin 5
while True:
c=uart.read(1)
if c[0] == 0x20:
uart.readinto(buffer)
checksum = 0xffff - 0x20
for i in range(29): checksum -= buffer[i]
if checksum == (buffer[30] << 8) | buffer[29]:
# skip buffer[0] = 0x40
ch1 = buffer[2]*255+buffer[1]
ch2 = buffer[4]*255+buffer[3]
ch3 = buffer[6]*255+buffer[5]
ch4 = buffer[8]*255+buffer[7]
ch5 = buffer[10]*255+buffer[9]
ch6 = buffer[12]*255+buffer[11]
print('ch 1-',ch1,' 2-',ch2,' 3-',ch3,' 4-',ch4,' 5-',ch5,' 6-',ch6)
.
timer.py
import ffilib
import uctypes
import array
import uos
import os
import utime
from signal import *
libc = ffilib.libc()
librt = ffilib.open("librt")
CLOCK_REALTIME = 0
CLOCK_MONOTONIC = 1
SIGEV_SIGNAL = 0
sigval_t = {
"sival_int": uctypes.INT32 | 0,
"sival_ptr": (uctypes.PTR | 0, uctypes.UINT8),
}
sigevent_t = {
"sigev_value": (0, sigval_t),
"sigev_signo": uctypes.INT32 | 8,
"sigev_notify": uctypes.INT32 | 12,
}
timespec_t = {
"tv_sec": uctypes.INT32 | 0,
"tv_nsec": uctypes.INT64 | 8,
}
itimerspec_t = {
"it_interval": (0, timespec_t),
"it_value": (16, timespec_t),
}
__libc_current_sigrtmin = libc.func("i", "__libc_current_sigrtmin", "")
SIGRTMIN = __libc_current_sigrtmin()
timer_create_ = librt.func("i", "timer_create", "ipp")
timer_settime_ = librt.func("i", "timer_settime", "PiPp")
def new(sdesc):
buf = bytearray(uctypes.sizeof(sdesc))
s = uctypes.struct(uctypes.addressof(buf), sdesc, uctypes.NATIVE)
return s
def timer_create(sig_id):
sev = new(sigevent_t)
#print(sev)
sev.sigev_notify = SIGEV_SIGNAL
sev.sigev_signo = SIGRTMIN + sig_id
timerid = array.array('P', [0])
r = timer_create_(CLOCK_MONOTONIC, sev, timerid)
os.check_error(r)
#print("timerid", hex(timerid[0]))
return timerid[0]
def timer_settime(tid, hz):
period = 1000000000 // hz
new_val = new(itimerspec_t)
new_val.it_value.tv_nsec = period
new_val.it_interval.tv_nsec = period
#print("new_val:", bytes(new_val))
old_val = new(itimerspec_t)
#print(new_val, old_val)
r = timer_settime_(tid, 0, new_val, old_val)
os.check_error(r)
#print("old_val:", bytes(old_val))
#print("timer_settime", r)
class Timer:
def __init__(self, id, freq):
self.id = id
self.tid = timer_create(id)
self.freq = freq
def callback(self, cb):
self.cb = cb
timer_settime(self.tid, self.freq)
org_sig = signal(SIGRTMIN + self.id, self.handler)
#print("Sig %d: %s" % (SIGRTMIN + self.id, org_sig))
def handler(self, signum):
#print('Signal handler called with signal', signum)
self.cb(self)
But when I open uctypes.py, INT32 is in the file. It seems there is no problem in file. What am I missing?
.
uctypes.py
ARRAY = -1073741824
BFINT16 = -671088640
BFINT32 = -402653184
BFINT8 = -939524096
BFUINT16 = -805306368
BFUINT32 = -536870912
BFUINT8 = -1073741824
BF_LEN = 22
BF_POS = 17
BIG_ENDIAN = 1
FLOAT32 = -268435456
FLOAT64 = -134217728
INT16 = 402653184
INT32 = 671088640
INT64 = 939524096
INT8 = 134217728
LITTLE_ENDIAN = 0
NATIVE = 2
PTR = 536870912
UINT16 = 268435456
UINT32 = 536870912
UINT64 = 805306368
UINT8 = 0
VOID = 0
def addressof():
pass
def bytearray_at():
pass
def bytes_at():
pass
def sizeof():
pass
class struct:
""

serial data buffering up in socat channel [duplicate]

I have a USB controller that I'm trying to get inputs from, a Microsoft® SideWinder® Plug & Play Game Pad. I'm having difficulties trying to figure out how to receive its inputs correctly. Unfortunately, I cannot use pygame as it requires a window to receive inputs from, but I have to generate a pyglet window (via PsychoPy) to run my program. With pygame it can connect and show the state of buttons, but it cannot receive inputs without creating a window. I tried looking for other libraries, but all I encountered was Inputs, which isn't compatible with my controller (doesn't detect the device after installing). The controller itself works as I've tested it with an online gamepad tester. PsychoPy's joystick API is currently broken and does not work, so no luck there either.
I was really hoping anyone had advice on how to receive inputs from my controller/gamepad into my program?
For Windows you can use the WINMM.dll directly.
Use the ctypes library to load the dll (see Loading shared libraries). Use ctypes.WINFUNCTYPE to create prototypes for the function:
joyGetNumDevs
joyGetDevCapsW
joyGetPosEx
import ctypes
winmmdll = ctypes.WinDLL('winmm.dll')
# [joyGetNumDevs](https://learn.microsoft.com/en-us/windows/win32/api/joystickapi/nf-joystickapi-joygetnumdevs)
"""
UINT joyGetNumDevs();
"""
joyGetNumDevs_proto = ctypes.WINFUNCTYPE(ctypes.c_uint)
joyGetNumDevs_func = joyGetNumDevs_proto(("joyGetNumDevs", winmmdll))
# [joyGetDevCaps](https://learn.microsoft.com/en-us/windows/win32/api/joystickapi/nf-joystickapi-joygetdevcaps)
"""
MMRESULT joyGetDevCaps(UINT uJoyID, LPJOYCAPS pjc, UINT cbjc);
32 bit: joyGetDevCapsA
64 bit: joyGetDevCapsW
sizeof(JOYCAPS): 728
"""
joyGetDevCaps_proto = ctypes.WINFUNCTYPE(ctypes.c_uint, ctypes.c_uint, ctypes.c_void_p, ctypes.c_uint)
joyGetDevCaps_param = (1, "uJoyID", 0), (1, "pjc", None), (1, "cbjc", 0)
joyGetDevCaps_func = joyGetDevCaps_proto(("joyGetDevCapsW", winmmdll), joyGetDevCaps_param)
# [joyGetPosEx](https://learn.microsoft.com/en-us/windows/win32/api/joystickapi/nf-joystickapi-joygetposex)
"""
MMRESULT joyGetPosEx(UINT uJoyID, LPJOYINFOEX pji);
sizeof(JOYINFOEX): 52
"""
joyGetPosEx_proto = ctypes.WINFUNCTYPE(ctypes.c_uint, ctypes.c_uint, ctypes.c_void_p)
joyGetPosEx_param = (1, "uJoyID", 0), (1, "pji", None)
joyGetPosEx_func = joyGetPosEx_proto(("joyGetPosEx", winmmdll), joyGetPosEx_param)
Create the python function joyGetNumDevs, joyGetDevCaps and joyGetPosEx which delegate to the DLL. And create classes for the types JOYCAPS respectively JOYINFOEX:
# joystickapi - joyGetNumDevs
def joyGetNumDevs():
try:
num = joyGetNumDevs_func()
except:
num = 0
return num
# joystickapi - joyGetDevCaps
def joyGetDevCaps(uJoyID):
try:
buffer = (ctypes.c_ubyte * JOYCAPS.SIZE_W)()
p1 = ctypes.c_uint(uJoyID)
p2 = ctypes.cast(buffer, ctypes.c_void_p)
p3 = ctypes.c_uint(JOYCAPS.SIZE_W)
ret_val = joyGetDevCaps_func(p1, p2, p3)
ret = (False, None) if ret_val != JOYERR_NOERROR else (True, JOYCAPS(buffer))
except:
ret = False, None
return ret
# joystickapi - joyGetPosEx
def joyGetPosEx(uJoyID):
try:
buffer = (ctypes.c_uint32 * (JOYINFOEX.SIZE // 4))()
buffer[0] = JOYINFOEX.SIZE
buffer[1] = JOY_RETURNALL
p1 = ctypes.c_uint(uJoyID)
p2 = ctypes.cast(buffer, ctypes.c_void_p)
ret_val = joyGetPosEx_func(p1, p2)
ret = (False, None) if ret_val != JOYERR_NOERROR else (True, JOYINFOEX(buffer))
except:
ret = False, None
return ret
JOYERR_NOERROR = 0
JOY_RETURNX = 0x00000001
JOY_RETURNY = 0x00000002
JOY_RETURNZ = 0x00000004
JOY_RETURNR = 0x00000008
JOY_RETURNU = 0x00000010
JOY_RETURNV = 0x00000020
JOY_RETURNPOV = 0x00000040
JOY_RETURNBUTTONS = 0x00000080
JOY_RETURNRAWDATA = 0x00000100
JOY_RETURNPOVCTS = 0x00000200
JOY_RETURNCENTERED = 0x00000400
JOY_USEDEADZONE = 0x00000800
JOY_RETURNALL = (JOY_RETURNX | JOY_RETURNY | JOY_RETURNZ | \
JOY_RETURNR | JOY_RETURNU | JOY_RETURNV | \
JOY_RETURNPOV | JOY_RETURNBUTTONS)
# joystickapi - JOYCAPS
class JOYCAPS:
SIZE_W = 728
OFFSET_V = 4 + 32*2
def __init__(self, buffer):
ushort_array = (ctypes.c_uint16 * 2).from_buffer(buffer)
self.wMid, self.wPid = ushort_array
wchar_array = (ctypes.c_wchar * 32).from_buffer(buffer, 4)
self.szPname = ctypes.cast(wchar_array, ctypes.c_wchar_p).value
uint_array = (ctypes.c_uint32 * 19).from_buffer(buffer, JOYCAPS.OFFSET_V)
self.wXmin, self.wXmax, self.wYmin, self.wYmax, self.wZmin, self.wZmax, \
self.wNumButtons, self.wPeriodMin, self.wPeriodMax, \
self.wRmin, self.wRmax, self.wUmin, self.wUmax, self.wVmin, self.wVmax, \
self.wCaps, self.wMaxAxes, self.wNumAxes, self.wMaxButtons = uint_array
# joystickapi - JOYINFOEX
class JOYINFOEX:
SIZE = 52
def __init__(self, buffer):
uint_array = (ctypes.c_uint32 * (JOYINFOEX.SIZE // 4)).from_buffer(buffer)
self.dwSize, self.dwFlags, \
self.dwXpos, self.dwYpos, self.dwZpos, self.dwRpos, self.dwUpos, self.dwVpos, \
self.dwButtons, self.dwButtonNumber, self.dwPOV, self.dwReserved1, self.dwReserved2 = uint_array
See the simple example to test the API:
import joystickapi
import msvcrt
import time
print("start")
num = joystickapi.joyGetNumDevs()
ret, caps, startinfo = False, None, None
for id in range(num):
ret, caps = joystickapi.joyGetDevCaps(id)
if ret:
print("gamepad detected: " + caps.szPname)
ret, startinfo = joystickapi.joyGetPosEx(id)
break
else:
print("no gamepad detected")
run = ret
while run:
time.sleep(0.1)
if msvcrt.kbhit() and msvcrt.getch() == chr(27).encode(): # detect ESC
run = False
ret, info = joystickapi.joyGetPosEx(id)
if ret:
btns = [(1 << i) & info.dwButtons != 0 for i in range(caps.wNumButtons)]
axisXYZ = [info.dwXpos-startinfo.dwXpos, info.dwYpos-startinfo.dwYpos, info.dwZpos-startinfo.dwZpos]
axisRUV = [info.dwRpos-startinfo.dwRpos, info.dwUpos-startinfo.dwUpos, info.dwVpos-startinfo.dwVpos]
if info.dwButtons:
print("buttons: ", btns)
if any([abs(v) > 10 for v in axisXYZ]):
print("axis:", axisXYZ)
if any([abs(v) > 10 for v in axisRUV]):
print("roation axis:", axisRUV)
print("end")
The api binding and example is provided in the GitHub repository python_windows_joystickapi

Thread pool is slow and same speed as serial

I'm trying to speed up calculations for extensive real time object detection and doing computation on it.
I'm using OpenCV with thread pool and producer, consumer for the video capture. But the execution speed is the same as the serial one.
How would I improve the speed of the execution ?
if __name__ == "__main__":
video_name = '2016-11-18_07-30-01.h264'
cap = cv2.VideoCapture(video_name)
det = detector.CarDetector()
car_tracker = Sort_Algorithm.Sort()
ped_tracker = Sort_Algorithm.Sort()
df_region, df_line = load_filter()
region = Region(df_region)
distance = compute_max_polygon_diagonal(df_region) * 0.1
region_buffered = region.buffer(distance)
threadn = cv2.getNumberOfCPUs()
pool = ThreadPool(processes = 2)
pending = deque()
threaded_mode = True
lock = threading.Lock()
while True:
while len(pending) > 0 and pending[0].ready():
res = pending.popleft().get()
cv2.imshow('video ', res)
if len(pending) < threadn:
ret, frame = cap.read()
if threaded_mode:
t1 = time.time()
H = [-2.01134074616, -16.6502442427, -1314.05715739, -3.35391526592, -22.3546973012, 2683.63584335,
-0.00130731963137, -0.0396207582264, 1]
matrix = np.reshape(H, (3, 3))
dst = cv2.warpPerspective(frame.copy(), matrix, (frame.shape[1], frame.shape[0]))
task = pool.apply_async(pipeline, (lock, frame.copy(),car_tracker, ped_tracker,df_region,region_buffered, df_line, det, dst, matrix))
cv2.imshow('dst', dst)
else:
task = DummyTask(pipeline,(lock, frame.copy(),car_tracker, ped_tracker,df_region, region_buffered, df_line, det, dst, matrix))
pending.append(task)
ch = cv2.waitKey(1)
if ch == ord(' '):
threaded_mode = not threaded_mode
if ch == 27:
break
The code for pipeline:
def pipeline(lock, img, car_tracker, ped_tracker, df_region, region_buffered, df_line, det, dst, H):
lock.acquire()
global point_lists
global df_car_lists
global frame_idx
global counter
global data_peds
global data_cars
global genera_data_pd_cars
global genera_data_pd_peds
car_box, ped_box = det.get_localization(img)
car_detections = car_tracker.update(np.array(car_box))
ped_detections = ped_tracker.update(np.array(ped_box))
saved_region = df_region.values
saved_region = np.delete(saved_region, 2, 1)
frame_idx+=1
cv2.warpPerspective(np.array(df_line, dtype=np.float32), H, (df_line.shape[1], df_line.shape[0]))
cv2.polylines(dst, np.int32([[saved_region]]), False, color=(255, 0, 0))
cv2.polylines(dst, np.int32([np.array(df_line, dtype=np.float32)]), False, color=(255, 0, 0))
for trk in car_detections:
trk = trk.astype(np.int32)
helpers.draw_box_label(img, trk, trk[4]) # Draw the bounding boxes on the
for other in ped_detections:
other = other.astype(np.int32)
helpers.draw_box_label(img, other, other[4]) # Draw the bounding boxes on the
for trk in car_detections:
trk = trk.astype(np.int32)
p = np.array([[((trk[1] + trk[3]) / 2, (trk[0] + trk[2]) / 2)]], dtype=np.float32)
center_pt = cv2.perspectiveTransform(p, H)
ptx = center_pt.T.item(0)
pty = center_pt.T.item(1)
df_cars = compute(trk[4], ptx, pty, frame_idx, df_region, region_buffered, df_line)
genera_data_pd_cars = genera_data_pd_cars.append(df_cars)
for other in ped_detections:
other = other.astype(np.int32)
p = np.array([[((other[1] + other[3]) / 2, (other[0] + other[2]) / 2)]], dtype=np.float32)
center_pt = cv2.perspectiveTransform(p, H)
ptx = center_pt.T.item(0)
pty = center_pt.T.item(1)
df_peds = compute(other[4], ptx, pty, frame_idx, df_region, region_buffered, df_line)
genera_data_pd_peds = genera_data_pd_cars.append(df_peds)
query = "is_in_region == True and is_in_region_now == True"
df_peds = genera_data_pd_peds.query(query)
query = " is_in_region == True"
df_cars = genera_data_pd_cars.query(query)
if len(df_cars)> 1 and len(df_peds) > 1:
df_car_in_t_range_ped = select_slice(df_cars, df_peds)
df_ped_in_t_range_car = select_slice(df_peds, df_cars)
t_abs_crossing_car = df_cars['t_abs_at_crossing'].iloc[0]
t_abs_crossing_ped = df_peds['t_abs_at_crossing'].iloc[0]
dt_crossing = t_abs_crossing_car - t_abs_crossing_ped
is_able_to_pass_before_ped = \
((df_car_in_t_range_ped['t_abs_at_crossing_estimated'] -
t_abs_crossing_ped) > 0).any()
behavior = Behavior( # is_passed_before_ped
dt_crossing < 0,
# is_able_to_stop
df_car_in_t_range_ped['is_able_to_halt'].any(),
# is_too_fast
df_car_in_t_range_ped['is_too_fast'].any(),
# is_close_enough
df_car_in_t_range_ped['is_close_enough'].any(),
# is_able_to_pass_before_ped
is_able_to_pass_before_ped)
interaction = Interaction(trk[4], other[4])
interaction = interaction.assess_behavior(behavior)
code, res, msg = interaction.code, interaction.res, interaction.msg
print(msg)
genera_data_pd_cars = genera_data_pd_cars.iloc[0:0]
genera_data_pd_peds = genera_data_pd_peds.iloc[0:0]
lock.release()
return img
Multi-threading in python for CPU bound tasks is limited by GIL and effectively makes single thread run a time.
Ofcourse if you launch multiple threads for CPU bound tasks the performance is going to be even degraded because there is lot of overhead for both for kernel and python interpreter to manage these threads.
Kernel wants to schedule these threads and python wants to restrict these threads from running simultaneous and this results lot of context switches happening which degrades the performance.
If you are using just numpy in the threads then you would be fine as numpy isn't impacted by GIL since it uses atomic operations, but I am not sure if that is true for OpenCV as well.
Threads in python arn't meant for computation tasks.
This is classic problem of threads with python, consider using multiprocessing and there are number of articles on this topic, you might want to check few of them.
Threads aren't executed in parallel in cpython. Try using the ProcessPoolExecutor instead.

Importing code from file into Python and adding file dialog box

I have a python script signalgen.py that plays audio using equations but I would like to be able to hard code the file where the equation is stored in eq1.txt or choose a file and import the equation.
The problems I'm having are:
1) How can I hard code a file and it's path correctly so it will play the equation as audio
I get an error
Traceback (most recent call last):
File "signalgen.py", line 484, in need_data
v += (datum * self.sig_level)
TypeError: can't multiply sequence by non-int of type 'float'
The specific block of code which I believe is causing the issue
def equation_import_function(self,t,f):
fileobj=open("/home/rat/eq1.txt","r")
eqdata =fileobj.read() #read whole file
fileobj.close()
#return math.tan(2.0*math.pi*f*t)
return eqdata
I have this line of code in the eq1.txt file-> math.tan(2.0*math.pi*f*t)
2) How can I add a file open dialog box to be able to choose a file and import the equation.
PS I'm using Ubuntu 10.04 (Linux) and the equations will be several pages long this is the reason I would like to import them into python from text files
Here's the entire code if you want to look at what I'm using below or seen on pastebin which includes line numbers http://pastebin.com/HZg0Jhaw
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (C) 2011, Paul Lutus *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program; if not, write to the *
# * Free Software Foundation, Inc., *
# * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
# ***************************************************************************
# version date 01-12-2011
VERSION = '1.1'
import re, sys, os
import gobject
gobject.threads_init()
import gst
import gtk
gtk.gdk.threads_init()
import time
import struct
import math
import random
import signal
import webbrowser
class Icon:
icon = [
"32 32 17 1",
" c None",
". c #2A2E30",
"+ c #333739",
"# c #464A4C",
"# c #855023",
"$ c #575A59",
"% c #676A69",
"& c #CC5B00",
"* c #777A78",
"= c #DB731A",
"- c #8A8C8A",
"; c #969895",
"> c #F68C22",
", c #A5A7A4",
"' c #F49D4A",
") c #B3B5B2",
"! c #DEE0DD",
" &&&&&&& ",
" &&&===='''''& ",
" &'''''====&'& ",
" +++++&'&&&&& &'& ",
" +#$%****&'&+ &'& ",
" +#**%$#++#&'&*#+ &'& ",
" +#**#+++++++&'&#**#+ &'& ",
" +$*$+++++++++&'&++$*$+ &'& ",
" #*#++++++++++&'&+++##&&&'& ",
" +*#++++++++#&&&'&+++#=''''& ",
" +*$++++++++#=''''&+++&'>>>'& ",
" #*+++++++++&'>>>'&+++#='''= ",
" +%$++++++++##='''=###++#&&&# ",
" +*#+++++++####&&&######++#*+ ",
" +*+++++++####++#$%$$####++*+ ",
" +*++++++##+#;,,*##*$$$###+*+ ",
" +*#++++###%!!!!,;#$*$$$###*+ ",
" +%$++++##+)!!!),-*+-%$$$#$%+ ",
" +#*+++###+-!!!,;-%#;%%$$+*#+ ",
" +*#++####+$*-*%#+*-%%$##*+ ",
" ++*#+###$$%#++#%;;*%%$#-$+ ",
" +#%+###$$%*;;;;-*%%%#**+ ",
" .+$%###$$$*******%$$*-+. ",
" .+#%%##$$*#*#%%%$%-%+. ",
" .++#%$$$$$$%%%%--#+. ",
" +++##$%*****%+++ ",
" +++++++++++++#. ",
" #--%#++#$*-%+ ",
" +%,))),;%+. ",
" ++++++. ",
" ",
" "
]
# this should be a temporary hack
class WidgetFinder:
def localize_widgets(self,parent,xmlfile):
# an unbelievable hack made necessary by
# someone unwilling to fix a year-old bug
with open(xmlfile) as f:
for name in re.findall('(?s) id="(.*?)"',f.read()):
if re.search('^k_',name):
obj = parent.builder.get_object(name)
setattr(parent,name,obj)
class ConfigManager:
def __init__(self,path,dic):
self.path = path
self.dic = dic
def read_config(self):
if os.path.exists(self.path):
with open(self.path) as f:
for record in f.readlines():
se = re.search('(.*?)\s*=\s*(.*)',record.strip())
if(se):
key,value = se.groups()
if (key in self.dic):
widget = self.dic[key]
typ = type(widget)
if(typ == list):
widget[0] = value
elif(typ == gtk.Entry):
widget.set_text(value)
elif(typ == gtk.HScale):
widget.set_value(float(value))
elif(typ == gtk.Window):
w,h = value.split(',')
widget.resize(int(w),int(h))
elif(typ == gtk.CheckButton or typ == gtk.RadioButton or typ == gtk.ToggleButton):
widget.set_active(value == 'True')
elif(typ == gtk.ComboBox):
if(value in widget.datalist):
i = widget.datalist.index(value)
widget.set_active(i)
else:
print "ERROR: reading, cannot identify key %s with type %s" % (key,type(widget))
def write_config(self):
with open(self.path,'w') as f:
for key,widget in sorted(self.dic.iteritems()):
typ = type(widget)
if(typ == list):
value = widget[0]
elif(typ == gtk.Entry):
value = widget.get_text()
elif(typ == gtk.HScale):
value = str(widget.get_value())
elif(typ == gtk.Window):
_,_,w,h = widget.get_allocation()
value = "%d,%d" % (w,h)
elif(typ == gtk.CheckButton or typ == gtk.RadioButton or typ == gtk.ToggleButton):
value = ('False','True')[widget.get_active()]
elif(typ == gtk.ComboBox):
value = widget.get_active_text()
else:
print "ERROR: writing, cannot identify key %s with type %s" % (key,type(widget))
value = "Error"
f.write("%s = %s\n" % (key,value))
def preset_combobox(self,box,v):
if(v in box.datalist):
i = box.datalist.index(v)
box.set_active(i)
else:
box.set_active(0)
def load_combobox(self,obj,data):
if(len(obj.get_cells()) == 0):
# Create a text cell renderer
cell = gtk.CellRendererText ()
obj.pack_start(cell)
obj.add_attribute (cell, "text", 0)
obj.get_model().clear()
for s in data:
obj.append_text(s.strip())
setattr(obj,'datalist',data)
class TextEntryController:
def __init__(self,parent,widget):
self.par = parent
self.widget = widget
widget.connect('scroll-event',self.scroll_event)
widget.set_tooltip_text('Enter number or:\n\
Mouse wheel: increase,decrease\n\
Shift/Ctrl/Alt: faster change')
def scroll_event(self,w,evt):
q = (-1,1)[evt.direction == gtk.gdk.SCROLL_UP]
# magnify change if shift,ctrl,alt pressed
for m in (1,2,4):
if(self.par.mod_key_val & m): q *= 10
s = self.widget.get_text()
v = float(s)
v += q
v = max(0,v)
s = self.par.format_num(v)
self.widget.set_text(s)
class SignalGen:
M_AM,M_FM = range(2)
W_SINE,W_TRIANGLE,W_SQUARE,W_SAWTOOTH,W_EQUATION_IMPORT = range(5)
waveform_strings = ('Sine','Triangle','Square','Sawtooth', 'Equation_Import')
R_48000,R_44100,R_22050,R_16000,R_11025,R_8000,R_4000 = range(7)
sample_rates = ('48000','44100','22050','16000', '11025', '8000', '4000')
def __init__(self):
self.restart = False
# exit correctly on system signals
signal.signal(signal.SIGTERM, self.close)
signal.signal(signal.SIGINT, self.close)
# precompile struct operator
self.struct_int = struct.Struct('i')
self.max_level = (2.0**31)-1
self.gen_functions = (
self.sine_function,
self.triangle_function,
self.square_function,
self.sawtooth_function,
self.equation_import_function
)
self.main_color = gtk.gdk.color_parse('#c04040')
self.sig_color = gtk.gdk.color_parse('#40c040')
self.mod_color = gtk.gdk.color_parse('#4040c0')
self.noise_color = gtk.gdk.color_parse('#c040c0')
self.pipeline = False
self.count = 0
self.imod = 0
self.rate = 1
self.mod_key_val = 0
self.sig_freq = 440
self.mod_freq = 3
self.sig_level = 100
self.mod_level = 100
self.noise_level = 100
self.enable = True
self.sig_waveform = SignalGen.W_SINE
self.sig_enable = True
self.sig_function = False
self.mod_waveform = SignalGen.W_SINE
self.mod_function = False
self.mod_mode = SignalGen.M_AM
self.mod_enable = False
self.noise_enable = False
self.sample_rate = SignalGen.R_22050
self.left_audio = True
self.right_audio = True
self.program_name = self.__class__.__name__
self.config_file = os.path.expanduser("~/." + self.program_name)
self.builder = gtk.Builder()
self.xmlfile = 'signalgen_gui.glade'
self.builder.add_from_file(self.xmlfile)
WidgetFinder().localize_widgets(self,self.xmlfile)
self.k_quit_button.connect('clicked',self.close)
self.k_help_button.connect('clicked',self.launch_help)
self.k_mainwindow.connect('destroy',self.close)
self.k_mainwindow.set_icon(gtk.gdk.pixbuf_new_from_xpm_data(Icon.icon))
self.title = self.program_name + ' ' + VERSION
self.k_mainwindow.set_title(self.title)
self.tooltips = {
self.k_sample_rate_combobox : 'Change data sampling rate',
self.k_left_checkbutton : 'Enable left channel audio',
self.k_right_checkbutton : 'Enable right channel audio',
self.k_sig_waveform_combobox : 'Select signal waveform',
self.k_mod_waveform_combobox : 'Select modulation waveform',
self.k_mod_enable_checkbutton : 'Enable modulation',
self.k_sig_enable_checkbutton : 'Enable signal',
self.k_noise_enable_checkbutton : 'Enable white noise',
self.k_mod_am_radiobutton : 'Enable amplitude modulation',
self.k_mod_fm_radiobutton : 'Enable frequency modulation',
self.k_quit_button : 'Quit %s' % self.title,
self.k_enable_checkbutton : 'Enable output',
self.k_help_button : 'Visit the %s Web page' % self.title,
}
for k,v in self.tooltips.iteritems():
k.set_tooltip_text(v)
self.config_data = {
'SampleRate' : self.k_sample_rate_combobox,
'LeftChannelEnabled' : self.k_left_checkbutton,
'RightChannelEnabled' : self.k_right_checkbutton,
'SignalWaveform' : self.k_sig_waveform_combobox,
'SignalFrequency' : self.k_sig_freq_entry,
'SignalLevel' : self.k_sig_level_entry,
'SignalEnabled' : self.k_sig_enable_checkbutton,
'ModulationWaveform' : self.k_mod_waveform_combobox,
'ModulationFrequency' : self.k_mod_freq_entry,
'ModulationLevel' : self.k_mod_level_entry,
'ModulationEnabled' : self.k_mod_enable_checkbutton,
'AmplitudeModulation' : self.k_mod_am_radiobutton,
'FrequencyModulation' : self.k_mod_fm_radiobutton,
'NoiseEnabled' : self.k_noise_enable_checkbutton,
'NoiseLevel' : self.k_noise_level_entry,
'OutputEnabled' : self.k_enable_checkbutton,
}
self.cm = ConfigManager(self.config_file,self.config_data)
self.cm.load_combobox(self.k_sig_waveform_combobox,self.waveform_strings)
self.k_sig_waveform_combobox.set_active(self.sig_waveform)
self.cm.load_combobox(self.k_mod_waveform_combobox,self.waveform_strings)
self.k_mod_waveform_combobox.set_active(self.mod_waveform)
self.cm.load_combobox(self.k_sample_rate_combobox,self.sample_rates)
self.k_sample_rate_combobox.set_active(self.sample_rate)
self.k_sig_freq_entry.set_text(self.format_num(self.sig_freq))
self.k_sig_level_entry.set_text(self.format_num(self.sig_level))
self.k_mod_freq_entry.set_text(self.format_num(self.mod_freq))
self.k_mod_level_entry.set_text(self.format_num(self.mod_level))
self.k_noise_level_entry.set_text(self.format_num(self.noise_level))
self.k_main_viewport_border.modify_bg(gtk.STATE_NORMAL,self.main_color)
self.k_sig_viewport_border.modify_bg(gtk.STATE_NORMAL,self.sig_color)
self.k_mod_viewport_border.modify_bg(gtk.STATE_NORMAL,self.mod_color)
self.k_noise_viewport_border.modify_bg(gtk.STATE_NORMAL,self.noise_color)
self.sig_freq_cont = TextEntryController(self,self.k_sig_freq_entry)
self.sig_level_cont = TextEntryController(self,self.k_sig_level_entry)
self.mod_freq_cont = TextEntryController(self,self.k_mod_freq_entry)
self.mod_level_cont = TextEntryController(self,self.k_mod_level_entry)
self.noise_level_cont = TextEntryController(self,self.k_noise_level_entry)
self.k_mainwindow.connect('key-press-event',self.key_event)
self.k_mainwindow.connect('key-release-event',self.key_event)
self.k_enable_checkbutton.connect('toggled',self.update_values)
self.k_sig_freq_entry.connect('changed',self.update_entry_values)
self.k_sig_level_entry.connect('changed',self.update_entry_values)
self.k_sig_enable_checkbutton.connect('toggled',self.update_checkbutton_values)
self.k_mod_freq_entry.connect('changed',self.update_entry_values)
self.k_mod_level_entry.connect('changed',self.update_entry_values)
self.k_noise_level_entry.connect('changed',self.update_entry_values)
self.k_sample_rate_combobox.connect('changed',self.update_values)
self.k_sig_waveform_combobox.connect('changed',self.update_values)
self.k_mod_waveform_combobox.connect('changed',self.update_values)
self.k_left_checkbutton.connect('toggled',self.update_checkbutton_values)
self.k_right_checkbutton.connect('toggled',self.update_checkbutton_values)
self.k_mod_enable_checkbutton.connect('toggled',self.update_checkbutton_values)
self.k_noise_enable_checkbutton.connect('toggled',self.update_checkbutton_values)
self.k_mod_am_radiobutton.connect('toggled',self.update_checkbutton_values)
self.cm.read_config()
self.update_entry_values()
self.update_checkbutton_values()
self.update_values()
def format_num(self,v):
return "%.2f" % v
def get_widget_text(self,w):
typ = type(w)
if(typ == gtk.ComboBox):
return w.get_active_text()
elif(typ == gtk.Entry):
return w.get_text()
def get_widget_num(self,w):
try:
return float(self.get_widget_text(w))
except:
return 0.0
def restart_test(self,w,pv):
nv = w.get_active()
self.restart |= (nv != pv)
return nv
def update_entry_values(self,*args):
self.sig_freq = self.get_widget_num(self.k_sig_freq_entry)
self.sig_level = self.get_widget_num(self.k_sig_level_entry) / 100.0
self.mod_freq = self.get_widget_num(self.k_mod_freq_entry)
self.mod_level = self.get_widget_num(self.k_mod_level_entry) / 100.0
self.noise_level = self.get_widget_num(self.k_noise_level_entry) / 100.0
def update_checkbutton_values(self,*args):
self.left_audio = self.k_left_checkbutton.get_active()
self.right_audio = self.k_right_checkbutton.get_active()
self.mod_enable = self.k_mod_enable_checkbutton.get_active()
self.sig_enable = self.k_sig_enable_checkbutton.get_active()
self.mod_mode = (SignalGen.M_FM,SignalGen.M_AM)[self.k_mod_am_radiobutton.get_active()]
self.noise_enable = self.k_noise_enable_checkbutton.get_active()
def update_values(self,*args):
self.restart = (not self.sig_function)
self.sample_rate = self.restart_test(self.k_sample_rate_combobox, self.sample_rate)
self.enable = self.restart_test(self.k_enable_checkbutton,self.enable)
self.mod_waveform = self.k_mod_waveform_combobox.get_active()
self.mod_function = self.gen_functions[self.mod_waveform]
self.sig_waveform = self.k_sig_waveform_combobox.get_active()
self.sig_function = self.gen_functions[self.sig_waveform]
self.k_sample_rate_combobox.set_sensitive(not self.enable)
if(self.restart):
self.init_audio()
def make_and_chain(self,name):
target = gst.element_factory_make(name)
self.chain.append(target)
return target
def unlink_gst(self):
if(self.pipeline):
self.pipeline.set_state(gst.STATE_NULL)
self.pipeline.remove_many(*self.chain)
gst.element_unlink_many(*self.chain)
for item in self.chain:
item = False
self.pipeline = False
time.sleep(0.01)
def init_audio(self):
self.unlink_gst()
if(self.enable):
self.chain = []
self.pipeline = gst.Pipeline("mypipeline")
self.source = self.make_and_chain("appsrc")
rs = SignalGen.sample_rates[self.sample_rate]
self.rate = float(rs)
self.interval = 1.0 / self.rate
caps = gst.Caps(
'audio/x-raw-int,'
'endianness=(int)1234,'
'channels=(int)2,'
'width=(int)32,'
'depth=(int)32,'
'signed=(boolean)true,'
'rate=(int)%s' % rs)
self.source.set_property('caps', caps)
self.sink = self.make_and_chain("autoaudiosink")
self.pipeline.add(*self.chain)
gst.element_link_many(*self.chain)
self.source.connect('need-data', self.need_data)
self.pipeline.set_state(gst.STATE_PLAYING)
def key_event(self,w,evt):
cn = gtk.gdk.keyval_name(evt.keyval)
if(re.search('Shift',cn) != None):
mod = 1
elif(re.search('Control',cn) != None):
mod = 2
elif(re.search('Alt|Meta',cn) != None):
mod = 4
else:
return
if(evt.type == gtk.gdk.KEY_PRESS):
self.mod_key_val |= mod
else:
self.mod_key_val &= ~mod
def sine_function(self,t,f):
return math.sin(2.0*math.pi*f*t)
def triangle_function(self,t,f):
q = 4*math.fmod(t*f,1)
q = (q,2-q)[q > 1]
return (q,-2-q)[q < -1]
def square_function(self,t,f):
if(f == 0): return 0
q = 0.5 - math.fmod(t*f,1)
return (-1,1)[q > 0]
def sawtooth_function(self,t,f):
return 2.0*math.fmod((t*f)+0.5,1.0)-1.0
def equation_import_function(self,t,f):
fileobj=open("/home/rat/eq1.txt","r")
eqdata =fileobj.read() #read whole file
fileobj.close()
#return math.tan(2.0*math.pi*f*t)
return eqdata
def need_data(self,src,length):
bytes = ""
# sending two channels, so divide requested length by 2
ld2 = length / 2
for tt in range(ld2):
t = (self.count + tt) * self.interval
if(not self.mod_enable):
datum = self.sig_function(t,self.sig_freq)
else:
mod = self.mod_function(t,self.mod_freq)
# AM mode
if(self.mod_mode == SignalGen.M_AM):
datum = 0.5 * self.sig_function(t,self.sig_freq) * (1.0 + (mod * self.mod_level))
# FM mode
else:
self.imod += (mod * self.mod_level * self.interval)
datum = self.sig_function(t+self.imod,self.sig_freq)
v = 0
if(self.sig_enable):
v += (datum * self.sig_level)
if(self.noise_enable):
noise = ((2.0 * random.random()) - 1.0)
v += noise * self.noise_level
v *= self.max_level
v = max(-self.max_level,v)
v = min(self.max_level,v)
left = (0,v)[self.left_audio]
right = (0,v)[self.right_audio]
bytes += self.struct_int.pack(left)
bytes += self.struct_int.pack(right)
self.count += ld2
src.emit('push-buffer', gst.Buffer(bytes))
def launch_help(self,*args):
webbrowser.open("http://arachnoid.com/python/signalgen_program.html")
def close(self,*args):
self.unlink_gst()
self.cm.write_config()
gtk.main_quit()
app=SignalGen()
gtk.main()
The imp module will help you to cleanly load Python code chunks from arbitrary files.
#!/usr/bin/env python
# equation in equation-one.py
def eqn(arg):
return arg * 3 + 2
#!/usr/bin/env python
# your code
import imp
path = "equation-one.py"
eq_mod = imp.load_source("equation", path, open(path))
print("Oh the nice stuff in eq_mod: %s" % dir(eq_mod))
In your custom function definition, you can create a file selector dialog, get the selected file path, load the code using imp, and return the result of the function inside the imported module.
I was commenting before, but I stared at your code long enough and kinda realized what you were trying to do, so it was easier for me to post an answer. Please refer to cJ Zougloubs answer as I expand on his suggestion to use the imp module.
Your equation files should implement a common interface:
# equation1.py
def eqn(*args):
return sum(*args)
Then you would load them in using cj Zougloubs suggestion, but with a common interface:
# python_rt.py
def equation_import_function(self, *args):
filepath = ''
# filepath = ... do file chooser dialog here ...
eq_mod = imp.load_source("equation", filepath)
eqdata = eq_mod.eqn(*args)
return eqdata
Now you have a function in your main code that takes any number of arguments, asks the user to pick the equation file, and gets the result for you.
Edit To address your comment more specifically
# equation1.py
import math
def eqn(*args):
f = args[0]
t = args[1]
return math.tan(2.0*math.pi*f*t)
And in your main tool, you would use imp.load_source to bring it in. Wherever you needed that equation for your audio, you could then do:
eq_mod.eqn(f, t)

Categories