Failing to load model using multiprocessing on windows - python

This program works on Unix and I'm trying to transition it to windows.
It uses multiprocessing and I understand it's an issue with being forced to use spawning on windows opposed to forking on linux for multiprocessing.
It's to do with the subprocess loading the models that I load on the main thread.
When Consumer() is called, during the init() it calls a function from another file that loads some tensorflow models.
Consumer()
import os
import time
import sys
from PIL import Image
import subprocess
from multiprocessing import Process, Queue
import t2identify
class Consumer:
def __init__(self, frameSource, layer):
self.directory = os.environ["directory"]
self.source = frameSource
self.task = layer
if layer == "screen":
self.layer = t2identify.identifyImage("apps")
elif layer == "detail":
self.layer = "detail"
self.imagesChecked = 0
self.errorsFound = 0
self.previousApp = "none"
self.appDetail = {
"mobile": t2identify.identifyImage("mobile"),
}
def start(self, state, queue):
self.state = state
consumer1 = Process(target=self.consumeImage, args=(queue,))
consumer1.start()
consumer2 = Process(target=self.consumeImage, args=(queue,))
consumer2.start()
consumer1.join()
consumer2.join()
t2identify.identifyImage() involves loading models.
t2identify.py
import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image
class identifyImage():
def __init__(self, layer):
import tensorflow as tf
availableLayers = {
"apps":"C:/Users/PycharmProjects/NNs/tf/appModel",
}
self.selectedLayer = availableLayers[layer]
self.model = tf.saved_model.load(self.selectedLayer)
self.label = self.loadLabels(availableLayers[layer]+"/labels.txt")
self.img_height = 224
self.img_width = 224
...
I'm confident the issue is when the consumer subprocess starts, the models that are loaded here are loaded again, why it says they're not found I'm not sure.
main.py
import pathos
import multiprocessing
import os
import time
import shutil
from os import path
from producer import Producer
from consumer import Consumer
from controller import Controller
from tqdm import tqdm
import re
import sys
from env import environmentVariables as Environment
class Main:
def start(self, test):
self.createDirectories()
screenQueue = multiprocessing.Queue()
detailQueue = multiprocessing.Queue()
self.producer = Producer(["SavedFrames", "ScreenFrames", "DetailFrames", "VisualFrames"])
self.producerProcess = multiprocessing.Process(target=self.producer.start,
args=(self.producerEvent, self.producerFrameRate, self.state,
self.expected, self.iteration, self.testCaseNumber,
[screenQueue, detailQueue]))
self.screenConsumer = Consumer("ScreenFrames", "screen")
# MODELS ARE LOADED
self.detailConsumer = Consumer("DetailFrames", "detail")
self.screenConsumerProcess = multiprocessing.Process(target=self.screenConsumer.start,
args=(self.state, screenQueue))
self.detailConsumerProcess = multiprocessing.Process(target=self.detailConsumer.start,
args=(self.state, detailQueue))
try:
# Set the new thread to run the controller which performs the test cases
self.controllerStart = Controller(self.producerEvent, self.producerFrameRate, self.state, self.expected, self.iteration,
self.testCaseNumber, self.progress)
self.controllerProcess = multiprocessing.Process(target=self.controllerStart.start, args=(test,))
except:
print("ERROR")
return False
self.producerProcess.start()
# FAILS on starting screenConsumerProcess (see error)
self.screenConsumerProcess.start()
self.detailConsumerProcess.start()
self.controllerProcess.start()
self.producerProcess.join()
self.screenConsumerProcess.join()
self.detailConsumerProcess.join()
self.controllerProcess.join()
self.zipFiles()
self.sendLogs()
return True
...
if __name__ == "__main__":
testing = Main()
results = testing.start()
The error:
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
File "\venv\lib\site-packages\keras\saving\pickle_utils.py", line 48, in deserialize_model_from_bytecode
model = save_module.load_model(temp_dir)
File "\venv\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
Traceback (most recent call last):
File "C:/Users/PycharmProjects/project/src/main.py", line 353, in <module>
raise e.with_traceback(filtered_tb) from None
File "\venv\lib\site-packages\tensorflow\python\saved_model\load.py", line 978, in load_internal
results = testing.start(data)
File "/PycharmProjects/project/src/main.py", line 95, in start
self.screenConsumerProcess.start()
File "\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 112, in start
str(err) + "\n You may be trying to load on a different device "
FileNotFoundError: Unsuccessful TensorSliceReader constructor: Failed to find any matching files for ram://4e4e1c18-ece9-4d99-88c3-5c2ee965c92a/variables/variables
You may be trying to load on a different device from the computational device. Consider setting the `experimental_io_device` option in `tf.saved_model.LoadOptions` to the io_device such as '/job:localhost'.
The models that are loaded aren't accessable by subprocesses. As ram://4e4e1c18-ece9-4d99-88c3-5c2ee965c92a/variables/variables is temp_dir in keras/pickle_utils.py model = save_module.load_model(temp_dir). Which is where the error occured.
Am I running out of ram? Or do I need to change some multiprocessing code now that I'm on windows.
EDIT:
I suspect it's most likely to do with windows reimporting everything once a new process starts (spawning). While doing this, the models are reloaded, and that's where the error occurs. I am still however unsure how to go about resolving this, apart from loading the models in main then passing the models into the subprocesses as parameters... which seems like a subpar solution.
EDIT2:
Now looking into using pathos which uses dill and not pickle. As I suspect the issue is with when I start the consumer process, the target is a class, which is not pickleable.

To avoid pickling of the tensorflow models, try moving the creation of these models to the process that uses them:
class Consumer:
def __init__(self, frameSource, layer):
self.directory = os.environ["directory"]
self.source = frameSource
self.task = layer
self._layer = layer
# The following code is moved to the consumeImage method:
"""
if layer == "screen":
self.layer = t2identify.identifyImage("apps")
elif layer == "detail":
self.layer = "detail"
"""
self.imagesChecked = 0
self.errorsFound = 0
self.previousApp = "none"
# The following code is moved to consumeImage:
"""
self.appDetail = {
"mobile": t2identify.identifyImage("mobile"),
}
"""
def start(self, state, queue):
self.state = state
consumer1 = Process(target=self.consumeImage, args=(queue,))
consumer1.start()
consumer2 = Process(target=self.consumeImage, args=(queue,))
consumer2.start()
consumer1.join()
consumer2.join()
def consumeImage(self, queue):
self.appDetail = {
"mobile": t2identify.identifyImage("mobile"),
}
if self._layer == "screen":
self.layer = t2identify.identifyImage("apps")
elif self._layer == "detail":
self.layer = "detail"
...

Related

Writing a dataset to multiple directories with modin and Ray pauses unexplainably

Problem
I am trying to perform IO operations with multiple directories using ray, modin(with ray backend) and python. The file writes pause and the memory and disk usages do not change at all and the program is blocked.
Setup
I have a ray actor set up as this
import os
os.environ["MODIN_ENGINE"] = "ray" # Modin will use Ray
import ray
import modin.pandas as mpd
from numpy.core import numeric
from tqdm import tqdm
#ray.remote
class DatasetHelper:
# Class Variables (static) are to be written here
#ray.method(num_returns=1)
def get_dataset(self):
return self.dataset
#ray.method(num_returns=1)
def generate_dataset(self):
# generates some dataset and returns a dictionary.
return {'status': 1,
'data_dir': self.data_dir}
#ray.method(num_returns=1)
def get_config(self):
return {
"data_dir": self.data_dir,
"data_map_dir": self.data_map_dir,
"out_dir": self.out_dir
}
def _validate_initialization(self):
# Logic here isnt relevant
if self.data_dir == "" or self.data_map == "" or self.nRows == 42:
return False
return True
def __init__(self, data_dir, data_map_dir, nRows, out_dir):
self.data = {}
self.data_map = {}
self.dataset = mpd.DataFrame()
self.timestamp = []
self.first = True
self.out_dir = out_dir
self.data_dir = data_dir
self.data_map_dir = data_map_dir
self.nRows = nRows
def _extract_data(self):
print('Reading data ...')
for each in os.listdir(self.data_dir):
self.data[each.split('.')[0]] = mpd.read_csv(os.path.join(self.data_dir, each),
header=None,
nrows=self.nRows)
print('Data read successfully ...')
print('Validating times for monotonicity and uniqueness ... ')
for each in tqdm(self.data):
if mpd.to_datetime(self.data[each][0]).is_monotonic and mpd.to_datetime(self.data[each][0]).is_unique:
pass
else:
print('Validation failed for uuid: {}'.format(each))
return
def _extract_data_maps(self):
self.data_map = mpd.read_pickle(self.data_map_dir)
print('Data-Map unpickled successfully ...')
The main logic is structured as shown below,
from functools import cached_property
import os
import threading
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
from DatasetHelper import DatasetHelper
import gc
import json
import logging
from multiprocessing import Process
import asyncio
import ray
ray.init(
# Limiting the object memory store used by ray.put()
# object_store_memory=20000000000,
# Limiting the memory usage of each worker.
# _memory = (1024.0 * 3) * 0.5,
# Specifiying custom directories for temp and object spilling
_temp_dir=os.path.join("/project/bhavaraj/Anaheim/ray_tmp"),
_system_config={
"object_spilling_config": json.dumps(
{"type": "filesystem", "params": {
"directory_path": "/project/bhavaraj/Anaheim/ray_plasma"}},
)
},
logging_level=logging.DEBUG,
ignore_reinit_error=True,
num_gpus=1,
num_cpus=40,
dashboard_port=8265
)
write_lock = threading.Lock()
def cache_dataset(loc):
from datetime import datetime
params = ray.get(loc.get_config.remote())
params['out_dir'] = os.getcwd() if params['out_dir'] is None else params['out_dir']
if os.path.exists(params['out_dir']) is False:
os.mkdir(params['out_dir'])
dataset_name = datetime.now().strftime("%H:%M:%S") + \
"_{}_Cache.csv".format(id(params['out_dir']))
print("Writing to file in {}".format(params['out_dir']))
print("Acquiring Lock")
with write_lock:
print("Lock acquired ...")
ray.get(loc.get_dataset.remote()).to_csv(os.path.join(params['out_dir'], dataset_name))
print("Writing to file finished at {}".format(params['out_dir']))
R_DATA_DIR: str = '/data/intermediate/R/'
R_DATA_MAP: str = '/data/external/DataMap/R.pkl'
G_DATA_DIR: str = '/data/intermediate/G/'
G_DATA_MAP: str = 'data/external/DataMap/G.pkl'
B_DATA_DIR: str = '/data/intermediate/B/'
B_DATA_MAP: str = '/data/external/DataMap/B.pkl'
C_DATA_DIR: str = '/data/intermediate/C/'
C_DATA_MAP: str = '/data/external/DataMap/C.pkl'
Z_DATA_DIR: str = '/data/intermediate/Z/'
Z_DATA_MAP: str = '/data/external/DataMap/Z.pkl'
objs_refs = []
n = 50000
b = DatasetHelper.remote(B_DATA_DIR, B_DATA_MAP, n,"./CB")
r = DatasetHelper.remote(R_DATA_DIR, R_DATA_MAP, n, "./LR")
c = DatasetHelper.remote(C_DATA_DIR, C_DATA_MAP, n, "./CC")
g = DatasetHelper.remote(G_DATA_DIR, G_DATA_MAP, n, "./AG")
objs_refs.append(b.generate_dataset.remote())
objs_refs.append(r.generate_dataset.remote())
objs_refs.append(c.generate_dataset.remote())
objs_refs.append(r.generate_dataset.remote())
objs_refs.append(g.generate_dataset.remote())
generate_outs = ray.get([x for x in objs_refs])
print("Printing dataset generation results...")
for each in generate_outs:
print(each)
# I also tried placing these methods inside the actor but the same issue persists
cache_dataset(b)
cache_dataset(r)
cache_dataset(c)
cache_dataset(g)
I tried decorating the cache_dataset() method with #remote and calling the method as below,
locs = [b, r, c, g]
ray.get([cache_dataset.remote(each) for each in locs])
Output
There are no errors with file writes but the programs pauses execution.
2021-09-20 08:32:53,024 DEBUG node.py:890 -- Process STDOUT and STDERR is being redirected to /project/bhavaraj/Anaheim/ray_tmp/session_2021-09-20_08-32-53_008570_36561/logs.
2021-09-20 08:32:53,172 DEBUG services.py:652 -- Waiting for redis server at 127.0.0.1:6379 to respond...
2021-09-20 08:32:53,334 DEBUG services.py:652 -- Waiting for redis server at 127.0.0.1:44291 to respond...
2021-09-20 08:32:53,340 DEBUG services.py:1043 -- Starting Redis shard with 10.0 GB max memory.
2021-09-20 08:33:01,212 INFO services.py:1263 -- View the Ray dashboard at http://127.0.0.1:8265
2021-09-20 08:33:01,216 DEBUG node.py:911 -- Process STDOUT and STDERR is being redirected to /project/bhavaraj/Anaheim/ray_tmp/session_2021-09-20_08-32-53_008570_36561/logs.
2021-09-20 08:33:01,221 DEBUG services.py:1788 -- Determine to start the Plasma object store with 76.48 GB memory using /dev/shm.
2021-09-20 08:33:01,314 DEBUG services.py:652 -- Waiting for redis server at 10.2.1.35:6379 to respond...
(pid=36906) Dataset shape: (100340, 41)
(pid=36913) Dataset shape: (150692, 40)
(pid=36902) Dataset shape: (103949, 41)
(pid=36910) Dataset shape: (420269, 41)
Printing dataset generation results... # prints the results correctly
Writing to file in ./CB
Acquiring Lock
Lock acquired ...
Writing to file finished at ./CB
Writing to file in ./LR
Acquiring Lock
Lock acquired ...
2021-09-20 08:43:02,612 DEBUG (unknown file):0 -- gc.collect() freed 115 refs in 0.23721289704553783 seconds
Hypothesis
I am thinking that the ray engine is stopping before all of the tasks have finished execution. I do not know how to prove or validate this hypothesis.
I also know that ray.get is supposed to block execution till all the tasks have finished executing.
There is a deadlock "like" situation somewhere.
References
https://docs.ray.io/en/latest/actors.html
https://towardsdatascience.com/writing-your-first-distributed-python-application-with-ray-4248ebc07f41
For any future readers,
modin.DataFrame.to_csv() pauses unexplainably for unknown reasons, but modin.Dataframe.to pickle() doesnt with the same logic.
There is also a significant performance increase in terms of read/write times, when data is stored as .pkl files.

Reading higher frequency data in thread and plotting graph in real-time with Tkinter

In the last couple of weeks, I've been trying to make an application that can read EEG data from OpenBCI Cyton (#250Hz) and plot a graph in 'real-time'. What seems to work better here are threads. I applied the tips I found here 1 to communicate the thread with Tkinter, but the application still doesn't work (gives me the error RecursionError: maximum recursion depth exceeded while calling a Python object). Maybe I'm doing something wrong because I'm trying to use multiple .py files? See below the main parts of my code and a few more comments in context:
###FILE main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from AppWindow import *
window = AppWindow()
window.start()
###FILE AppWindow.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
import scroller as scrl
import logging
import requests
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import random
from pandas import DataFrame
import stream_lsl_eeg as leeg
#Definitions
H = 720
W = 1280
#Color palette>> https://www.color-hex.com/color-palette/92077
bg_color = "#c4ac93"
sc_color = "#bba58e"
tx_color = "#313843"
dt_color = "#987f62"
wn_color = "#6b553b"
class AppWindow:
#Other Functions
def plotGraph(self, x, y):
self.ax.clear()
self.ax.plot(x,y, color = tx_color)
plt.subplots_adjust(bottom=0.31, left=0.136, top=0.9, right=0.99)
plt.ylabel('Magnitude', fontsize = 9, color = tx_color)
plt.xlabel('Freq', fontsize = 9, color = tx_color)
self.figure.canvas.draw()
def __init__(self):
self.root = tk.Tk() #start of application
self.root.wm_title("Hybrid BCI - SSVEP and Eye Tracker")
#Other Graphical Elements
#Button that calls function
self.btn_ReceiveEEG = tk.Button(self.EEG_frame, text = "Receive EEG signal", bg = bg_color, fg = tx_color, state = tk.DISABLED, command = lambda: leeg.getEEGstream(self))
self.btn_ReceiveEEG.place(anchor = 'nw', relx = 0.52, rely = 0.5, width = 196, height = 40)
#Other Graphical Elements
def start(self):
self.root.mainloop() #end of application
### FILE stream_lsl_eeg.py
from pylsl import StreamInlet, resolve_stream
import tkinter as tk
import AppWindow as app
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import threading
import queue
import time
class myThread(threading.Thread):
def __init__(self, name, q, f):
threading.Thread.__init__(self)
self.name = name
self.q = q
self.f = f
def run(self):
print("Starting ", self.name)
pullSamples(self.q, self.f) #place where function is called
def getInlet(app): #this is triggered by another button and it's working fine
global inlet
app.logger.warn('Looking for an EEG strean...')
streams = resolve_stream('type', 'EEG')
inlet = StreamInlet(streams[0])
app.logger.warn('Connected')
app.btn_ReceiveEEG.config(state = tk.NORMAL)
def pullSamples(q):
i = 0
while i<1000:
sample, timestamp = inlet.pull_sample()
threadLock.acquire() #thread locks to put info in the queue
q.put([sample,timestamp]) #data is put in the queue for other threads to access
threadLock.release() #thread unlocks after info is in
i += 1
stopcollecting = 1
print("Exit flag on")
def plotSamples(app, kounter): #Outside Thread
if not stopcollecting: #testing if stream reception stopped
while dataqueue.qsize( ):
try:
kounter += 1
sample, timestamp = dataqueue.get(0)
samples.append(sample[0]) #getting just channel 1 (0)
timestamps.append(timestamp)
show_samples = samples[-250:]
show_timestamps = timestamps[-250:]
app.plotGraph(show_timestamps,show_samples)
print(counter) #this was just a control to count if the right amount of samples was coming out of the queue
except dataqueue.Empty:
pass #still not implemented, but will return to the main application
app.root.after(60, plotSamples(flag,app,kounter)) #60 chosen because plot should update every 15 samples (15/250 = 0,06s)
def getEEGstream(app): #function called by button
app.logger.warn('Starting thread...')
#
kounter = 0
start = time.perf_counter()
thread1.start()
##
plotSamples(flag, app, kounter)
##
thread1.join() #I don't know if I need this...
finish = time.perf_counter()
#
print(f'Sizes: Samples [{len(samples)}, {len(samples[0])}], {len(timestamps)} timestamps')
print(f'Sucessfully streamed in {round(finish-start,3)}s!')
###
threadLock = threading.Lock()
dataqueue = queue.Queue()
stopcollecting = 0
kounter = []
flag = queue.Queue() #secondary queue for flags not used at the moment
flag.put(0)
thread1 = myThread("Thread-1", dataqueue,flag)
samples,timestamps = [],[]
show_samples, show_timestamps = [],[]
As I found here 2, a function should not call itself, but it's basically what here 1 does. Also, I don't think I'm calling root.mainloop() multiple times like done in here 3.
After executing, python gives me the following error/output:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\robotics\AppData\Local\Continuum\anaconda3\envs\psychopy\lib\tkinter\__init__.py", line 1705, in __call__
return self.func(*args)
File "C:\Users\robotics\Documents\gitDocuments\SSVEP_EyeGaze_py\AppWindow.py", line 109, in <lambda>
self.btn_ReceiveEEG = tk.Button(self.EEG_frame, text = "Receive EEG signal", bg = bg_color, fg = tx_color, state = tk.DISABLED, command = lambda: leeg.getEEGstream(self))
File "C:\Users\robotics\Documents\gitDocuments\SSVEP_EyeGaze_py\stream_lsl_eeg.py", line 118, in getEEGstream
plotSamples(flag, app, kounter)
File "C:\Users\robotics\Documents\gitDocuments\SSVEP_EyeGaze_py\stream_lsl_eeg.py", line 104, in plotSamples
app.root.after(60, plotSamples(flag,app,kounter))
File "C:\Users\robotics\Documents\gitDocuments\SSVEP_EyeGaze_py\stream_lsl_eeg.py", line 104, in plotSamples
app.root.after(60, plotSamples(flag,app,kounter))
File "C:\Users\robotics\Documents\gitDocuments\SSVEP_EyeGaze_py\stream_lsl_eeg.py", line 104, in plotSamples
app.root.after(60, plotSamples(flag,app,kounter))
[Previous line repeated 986 more times]
File "C:\Users\robotics\Documents\gitDocuments\SSVEP_EyeGaze_py\stream_lsl_eeg.py", line 92, in plotSamples
while dataqueue.qsize( ): # if not dataqueue.empty():
File "C:\Users\robotics\AppData\Local\Continuum\anaconda3\envs\psychopy\lib\queue.py", line 87, in qsize
with self.mutex:
RecursionError: maximum recursion depth exceeded while calling a Python object
Exit flag on
This means the thread is being successfully executed, apparently, but the plotSamples() is crashing.
Any advice??
after() (similar to button's command= and bind()) needs function's name without () and without argument - it is called callback - and after sends it to mainloop and mainloop later uses () to run it.
You use function with ()
app.root.after(60, plotSamples(flag,app,kounter))
so it runs it at once (it doesn't send it to mainloop) and this function runs at once again the same function which runs at once the same function, etc. - so you create recursion.
It works like
result = plotSamples(flag,app,kounter) # run at once
app.root.after(60, result)
If you have to use function with arguments then you can do
app.root.after(60, plotSamples, flag, app, kounter)
Eventually you can use lambda to create function without argument
app.root.after(60, lambda:plotSamples(flag,app,kounter) )

Serial Communication breaks down when threading

I am using a USB microwave source, which communicates via a virtual COM port.
Communication is done via python. Everything works fine, as long as I am executing the code blockingly.
However, as soon as any communication is done in a thread
I get a SerialException: Attempting to use a port that is not open. Is there an obvious reason, why this is happening? Nothing else, at least originating from my software, is trying to communicate with the port during that time.
The highest level script:
from serial import SerialException
import deer
import windfreak_usb
try:
deer.mw_targets = windfreak_usb.WindfreakSynthesizer()
except SerialException:
deer.mw_targets.port.close()
deer.mw_targets = windfreak_usb.WindfreakSynthesizer()
deer_mes = deer.DeerMeasurement(f_start=0.9e9,
f_end=1.2e9,
df=3e6,
f_nv=1.704e9,
seq=["[(['mw'],28),([],tau),(['mw', 'mwy'],56),([],tau),(['mw'],28),([],100)]",
"[(['mw'],28),([],tau),(['mw', 'mwy'],56),([],tau),(['mw'],84),([],100)]"],
power_nv=10,
power_targets=3,
tau=700
)
deer_mes.run(10e6) # <- this works perfectly, as it is the blocking version
# deer_mes.start(10e6) # <- raises the SerialException at the line indicated below
deer.mw_targets.port.close()
A reduced form of the microwave source (windfreak_usb.py):
import serial
import synthesizer
class WindfreakSynthesizer(synthesizer.Synthesizer):
def __init__(self):
synthesizer.Synthesizer.__init__(self)
self.port = serial.Serial(
port='COM14',
baudrate=9600,
timeout=10
)
self.off()
def __del__(self):
self.port.close()
def off(self):
self.port.write('o0')
def power(self, p):
p = int(p)
self.port.write('a{}'.format(p))
A reduced form of the measurement class (deer.py):
import threading
import time
import numpy
import os
from PyQt4 import QtCore
from PyQt4.QtCore import QObject
import matplotlib.pyplot as plt
import hardware
import matpickle
import pulsed
import pulser
import savepath
import synthesizer
if 'pg' not in globals():
pg = pulser.Pulser()
if 'mw_targets' not in globals():
mw_targets = synthesizer.Synthesizer()
timeout = 30
CurrentMeasurement = None # global variable pointing to the currently active measurement
class DeerMeasurement(QObject):
update = QtCore.pyqtSignal()
def __init__(self, f_start, f_end, df, f_nv, seq, power_nv, power_targets, tau, sweeps_per_iteration=50e3,
switching_time=300e-6):
super(QObject, self).__init__()
""" setting all parameters as self.parameter """
self.power_targets = power_targets
self.fs = numpy.arange(f_start, f_end + df, df)
self.abort = threading.Event()
self.save_deer()
def run(self, sweeps):
global CurrentMeasurement
if CurrentMeasurement is not None:
print('Deer Warning: cannot start measurement while another one is already running. Returning...')
return
CurrentMeasurement = self
# Start measurement
print('Deer measurement started.')
mw_targets.power(self.power_targets) # <- This causes the SerialException.
""" Here comes the actual measurement, that is never executed, as the line above kills the thread already with the SerialException. """
def start(self, sweeps, monitor=None):
"""Start Measurement in a thread."""
if monitor is not None:
monitor.register(self)
if not hasattr(self, 'mes_thread'):
# noinspection PyAttributeOutsideInit
self.mes_thread = threading.Thread(target=self.run, args=(sweeps,))
self.mes_thread.start()
else:
print('Already threading')
Any help is highly appreciated, as running the measurement outside a thread is not an option.
Best regards!

fhss project in python: Error 'fft_window' object has no attribute '_proxies'

this is what i am getting when i execute the following code.
Traceback (most recent call last):
File "usrp.py",line 100 in <module>
tb.set_freq(i)
File "usrp.py",line 77 in set_freq
self.wxgui_fftsink2_0.set_baseband_freq(self.freq)
File "usr/local/lib/python2.7/dist-packages/gnuradio/wxgui/common.py",line 131 in set
def set(value): controller[key]=value
File "usr/local/lib/python2.7/dist-packages/gnuradio/wxgui/pubsub.py",line 44 in _setitem_
elif self._proxies[key] is not None:
AttributeError: 'fft_window' object has no attribute '_proxies'
i have seen this kind of error in cyclic dependency.i have earlier solved cyclic dependency by just importing the package instead of using from keyword.i had tried import gnuradio in ths case but of no use.Following is the code on which i am working on.it would be great help if this could be resolved.i haven't come across this kind of an error.
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Usrp
# Generated: Sat Feb 21 11:26:17 2015
##################################################
#################################################
# Gnuradio Python Flow Graph
# Title: Usrp
# Generated: Sat Feb 21 11:26:17 2015
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import wxgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.wxgui import fftsink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx,time,random
class usrp(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Usrp")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 32000
self.freq = freq = 900e6
##################################################
# Blocks
##################################################
self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(
self.GetWin(),
baseband_freq=freq,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate
fft_size=1024,
fft_rate=15,
average=False,
avg_alpha=None,
title="FFT Plot",
peak_hold=False,
)
self.Add(self.wxgui_fftsink2_0.win)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, analog.GR_SIN_WAVE, 100e3,1, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.wxgui_fftsink2_0, 0))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.wxgui_fftsink2_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self.wxgui_fftsink2_0.set_baseband_freq(self.freq)
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = usrp()
def t_range(beg,end,incr):
while beg<=end:
yield beg
beg+=incr
j=2
for i in t_range(910e6,1010e6,10e6):
tb.set_freq(i)
#time.sleep(j)
tb.Start(True)
time.sleep(j)
tb.Wait()
This is most likely not a cyclic dependency issue.
I'm however a bit concerned about that error. You see, pubsub's __init__ generates a _proxies attribute, so if a class is a subclass of pubsub, it should have _proxies; if it's not, it shouldn't see that call.
class pubsub(dict):
def __init__(self):
self._publishers = { }
self._subscribers = { }
self._proxies = { }
....
So, interestingly, your script works for me (after fixing a missing , in line 45), which might indicate you're mixing sources of different GNU Radio versions:
As a solution, I recommend making sure you're really using one version of GNU Radio, preferably the latest, and installed from source (debian's packages are rather up-to-date, too, thanks to Maitland), which can happen automagically using PyBOMBS.

Cocos2d: AttributeError: 'Director' object has no attribute '_window_virtual_width'

We are using the cocos2d framework to create a game. We're completely new to this framework, so we cannot get the director object to work as we are expecting. Here is our code skeleton:
from cocos.director import director
from cocos.layer import base_layers
import sys
import math
import os
import pyglet
import cocos
world_width = 1000
world_height = 1000
class NetworkMap(cocos.layer.ScrollableLayer):
def __init__(self, world_width, world_height):
self.world_width = world_width
self.world_height = world_height
super(NetworkMap, self).__init__()
bg = ColorLayer(170,170,0,255,width=500,height=500)
self.px_width = world_width
self.px_height = world_height
self.add(bg,z=0)
class TestScene(cocos.scene.Scene):
def __init__(self):
super(TestScene,self).__init__()
def on_enter():
director.push_handlers(self.on_cocos_resize)
super(TestScene, self).on_enter()
def on_cocos_resize(self, usable_width, usable_height):
self.f_refresh_marks()
def main():
scene = TestScene()
director.init(world_width, world_height, do_not_scale=True)
world_map = NetworkMap(world_width, world_height)
scroller = cocos.layer.ScrollingManager()
scroller.add(world_map)
scene.add(scroller)
director.run(scene)
So for some reason the director doesn't have all the attributes we want.
Our stack trace is:
Traceback (most recent call last):
File "map.py", line 49, in <module>
main()
File "map.py", line 39, in main
scene = TestScene()
File "map.py", line 29, in __init__
super(TestScene,self).__init__()
File "/usr/local/lib/python2.7/dist-packages/cocos2d-0.5.5-py2.7.egg/cocos/scene.py", line 95, in __init__
super(Scene,self).__init__()
File "/usr/local/lib/python2.7/dist-packages/cocos2d-0.5.5-py2.7.egg/cocos/cocosnode.py", line 114, in __init__
self.camera = Camera()
File "/usr/local/lib/python2.7/dist-packages/cocos2d-0.5.5-py2.7.egg/cocos/camera.py", line 56, in __init__
self.restore()
File "/usr/local/lib/python2.7/dist-packages/cocos2d-0.5.5-py2.7.egg/cocos/camera.py", line 76, in restore
width, height = director.get_window_size()
File "/usr/local/lib/python2.7/dist-packages/cocos2d-0.5.5-py2.7.egg/cocos/director.py", line 522, in get_window_size
return ( self._window_virtual_width, self._window_virtual_height)
AttributeError: 'Director' object has no attribute '_window_virtual_width'
You need to initialise the director before you instantiate your first scene. The director is the global object that initialises your screen, sets up the Cocos2D framework, etc.
I found a few other errors:
You need to change ColorLayer to be fully qualified, e.g. cocos.layer.ColorLayer.
on_enter needs to have self as the first argument.
You need to define f_refresh_marks in your TestScene class.
Here's a working copy of the code. (Working, in the sense that it does not throw errors, not that it does any sort of scrolling.)
from cocos.director import director
from cocos.layer import base_layers
import sys
import math
import os
import pyglet
import cocos
world_width = 1000
world_height = 1000
class NetworkMap(cocos.layer.ScrollableLayer):
def __init__(self, world_width, world_height):
self.world_width = world_width
self.world_height = world_height
super(NetworkMap, self).__init__()
bg = cocos.layer.ColorLayer(170,170,0,255,width=500,height=500)
self.px_width = world_width
self.px_height = world_height
self.add(bg,z=0)
class TestScene(cocos.scene.Scene):
def __init__(self):
super(TestScene,self).__init__()
def on_enter(self):
director.push_handlers(self.on_cocos_resize)
super(TestScene, self).on_enter()
def on_cocos_resize(self, usable_width, usable_height):
self.f_refresh_marks()
def f_refresh_marks(self):
pass
def main():
director.init(world_width, world_height, do_not_scale=True)
scene = TestScene()
world_map = NetworkMap(world_width, world_height)
scroller = cocos.layer.ScrollingManager()
scroller.add(world_map)
scene.add(scroller)
director.run(scene)
if __name__ == '__main__': main()
I had the same issue (with a very similar stack trace) and it was because I was trying to create a layer before calling director.init(). Moving director.init() to earlier in the code fixed it for me.

Categories