This is an old issue which suggested workaround does not work.
Below is a complete example showing how the suggested approach fails. Uncomment L31 for error.
import multiprocessing
import os
import time
from multiprocessing import get_context
from multiprocessing.queues import Queue
class SharedCounter(object):
def __init__(self, n=0):
self.count = multiprocessing.Value('i', n)
def increment(self, n=1):
with self.count.get_lock():
self.count.value += n
#property
def value(self):
return self.count.value
class MyQueue(Queue):
def __init__(self, *args, **kwargs):
super(MyQueue, self).__init__(*args, ctx=get_context(), **kwargs)
self.size = SharedCounter(0)
def put(self, *args, **kwargs):
self.size.increment(1)
super(MyQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
# self.size.increment(-1) # uncomment this for error
return super(MyQueue, self).get(*args, **kwargs)
def qsize(self):
return self.size.value
def empty(self):
return not self.qsize()
def clear(self):
while not self.empty():
self.get()
def worker(queue):
while True:
item = queue.get()
if item is None:
break
print(f'[{os.getpid()}]: got {item}')
time.sleep(1)
if __name__ == '__main__':
num_processes = 4
q = MyQueue()
pool = multiprocessing.Pool(num_processes, worker, (q,))
for i in range(10):
q.put("hello")
q.put("world")
for i in range(num_processes):
q.put(None)
q.close()
q.join_thread()
pool.close()
pool.join()
For some reason, the newly defined MyQueue forgets about the size attribute.
Process SpawnPoolWorker-1:
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
Process SpawnPoolWorker-2:
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
Process SpawnPoolWorker-4:
Process SpawnPoolWorker-3:
Traceback (most recent call last):
Traceback (most recent call last):
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 314, in _bootstrap
self.run()
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/Cellar/python#3.11/3.11.0/Frameworks/Python.framework/Versions/3.11/lib/python3.11/multiprocessing/pool.py", line 109, in worker
initializer(*initargs)
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 47, in worker
item = queue.get()
^^^^^^^^^^^
File "/Users/user/Library/Application Support/JetBrains/PyCharm2022.3/scratches/scratch.py", line 31, in get
self.size.increment(-1) # uncomment this for error
^^^^^^^^^
AttributeError: 'MyQueue' object has no attribute 'size'
well, you didn't to override __setstate__ and __getstate__ to include your variable, which are used by pickle to control the serialization Handling Stateful Objects ... so you should override them to add your variable to what's being serialized.
import multiprocessing
import os
import time
from multiprocessing import get_context
from multiprocessing.queues import Queue
class SharedCounter(object):
def __init__(self, n=0):
self.count = multiprocessing.Value('i', n)
def increment(self, n=1):
with self.count.get_lock():
self.count.value += n
#property
def value(self):
return self.count.value
class MyQueue(Queue):
def __init__(self, *args, **kwargs):
super(MyQueue, self).__init__(*args, ctx=get_context(), **kwargs)
self.size = SharedCounter(0)
def __getstate__(self):
return (super(MyQueue, self).__getstate__(),self.size)
def __setstate__(self, state):
super(MyQueue, self).__setstate__(state[0])
self.size = state[1]
def put(self, *args, **kwargs):
self.size.increment(1)
super(MyQueue, self).put(*args, **kwargs)
def get(self, *args, **kwargs):
self.size.increment(-1) # uncomment this for error
return super(MyQueue, self).get(*args, **kwargs)
def qsize(self):
return self.size.value
def empty(self):
return not self.qsize()
def clear(self):
while not self.empty():
self.get()
def worker(queue):
while True:
item = queue.get()
if item is None:
break
print(f'[{os.getpid()}]: got {item}')
time.sleep(1)
if __name__ == '__main__':
num_processes = 4
q = MyQueue()
pool = multiprocessing.Pool(num_processes, initializer=worker, initargs=(q,))
for i in range(10):
q.put("hello")
q.put("world")
for i in range(num_processes):
q.put(None)
q.close()
q.join_thread()
pool.close()
pool.join()
note that in python 3 we don't need to use super(MyQueue, self), as super() would suffice, and will make it easier to rename your class in the future and other portability and refactoring benefits, so consider swapping any super(x,y) with just super()
Related
I wrote two decorators, verbose controls whether or not the defined function prints it's output, announcer will announce when the function is called.
import os
import sys
def verbose(func):
'''Sets verbose mode on function'''
#functools.wraps(func)
def wrapper_func(verbboo=True, *args, **kwargs):
# disabling print
if not verbboo:
sys.stdout = open(os.devnull, 'w')
# running func
ret = func(*args, **kwargs)
# enabling print again
if not verbboo:
sys.stdout = sys.__stdout__
return ret
return wrapper_func
def announcer(func, endboo=True):
'''anounces when function is called and when it finishes; if specified'''
#functools.wraps(func)
def wrapper_func(*args, **kwargs):
print('run {}.{}#{:%Y%m%d%H%M}'.format(
func.__module__, func.__name__,
dt.datetime.now())
)
ret = func(*args, **kwargs)
if endboo:
print('end {}.{}#{:%Y%m%d%H%M}'.format(
func.__module__, func.__name__,
dt.datetime.now())
)
return ret
return wrapper_func
I then nest the following function with the decorators
#verbose
#announcer
def f(boo, opboo=True):
if boo:
print('This is True')
if opboo:
print('This is also True')
return f
# testing
f(True)
But I receive the following error
run __main__.f#202006021152
Traceback (most recent call last):
File "/home/user/anaconda3/envs/mpl/lib/python3.8/runpy.py", line 193, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/user/anaconda3/envs/mpl/lib/python3.8/runpy.py", line 86, in _run_code
exec(code, run_globals)
File "/home/user/project/test.py", line 17, in <module>
f(True)
File "/home/user/project/decorators.py", line 18, in wrapper_func
ret = func(*args, **kwargs)
File "/home/user/project/decorators.py", line 47, in wrapper_func
ret = func(*args, **kwargs)
TypeError: f() missing 1 required positional argument: 'boo'
The error only occurs when I nest verbose on top of announcer. announcer by itself works fine. What is going on?
I think the problem is in this line:
def wrapper_func(verbboo=True, *args, **kwargs):
A function definition has to have all of the positional arguments before any of the keyword arguments. Probably the keyword argument verbboo interfered with the positional argument boo getting passed through.
When I put the verbboo parameter at the end, it still didn't run, but when I put it between *args and **kwargs, it did run.
I would like to make a program which runs two threads which can be simultaneously interrupted using ctrl+C. The following script is a simplified version of this:
import time
import threading
class Controller(object):
def __init__(self, name=None):
self.name = name
def run_once(self):
print("Controller {} is running once...".format(self.name))
def run_forever(self):
while True:
self.run_once()
time.sleep(1)
if __name__ == "__main__":
controller1 = Controller(name="1")
controller2 = Controller(name="2")
thread1 = threading.Thread(target=controller1.run_forever)
thread2 = threading.Thread(target=controller2.run_forever)
thread1.daemon = True
thread2.daemon = True
thread1.start()
thread2.start()
try:
while True:
thread1.join(1)
thread2.join(1)
if not thread1.isAlive() or not thread2.isAlive():
break
except KeyboardInterrupt:
pass
I'm trying to make the code a bit more DRY by doing the following:
import time
import threading
class Controller(object):
def __init__(self, name=None):
self.name = name
def run_once(self):
print("Controller {} is running once...".format(self.name))
def run_forever(self):
while True:
self.run_once()
time.sleep(1)
class ThreadController(Controller, threading.Thread):
def __init__(self, *args, **kwargs):
Controller.__init__(self, *args, **kwargs)
threading.Thread.__init__(self, target=self.run_forever)
self.daemon = True
self.start()
if __name__ == "__main__":
thread1 = ThreadController(name="1")
thread2 = ThreadController(name="2")
try:
while True:
thread1.join(1)
thread2.join(1)
if not thread1.isAlive() or not thread2.isAlive():
break
except KeyboardInterrupt:
pass
However, when I try to run the latter script, I get the following error:
Traceback (most recent call last):
File "threading_test3.py", line 34, in <module>
thread1 = ThreadController(name="1")
File "threading_test3.py", line 18, in __init__
Controller.__init__(self, *args, **kwargs)
File "threading_test3.py", line 6, in __init__
self.name = name
File "/usr/lib/python2.7/threading.py", line 971, in name
assert self.__initialized, "Thread.__init__() not called"
AssertionError: Thread.__init__() not called
I don't understand why Thread.__init__() is not called, because it seems like it is called in the __init__ of ThreadController. What is causing this error?
Call Thread's init, first;
class ThreadController(Controller, threading.Thread):
def __init__(self, *args, **kwargs):
threading.Thread.__init__(self, target=self.run_forever)
Controller.__init__(self, *args, **kwargs)
self.daemon = True
self.start()
I am trying to use this example as a template for a queuing system on my cherrypy app.
I was able to convert it from python 2 to python 3 (change from Queue import Empty into from queue import Empty) and to execute it in Ubuntu. But when I execute it in Windows I get the following error:
F:\workspace\test>python test.py
Traceback (most recent call last):
File "test.py", line 112, in <module>
broker.start()
File "C:\Anaconda3\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\Anaconda3\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 59, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot serialize '_io.TextIOWrapper' object
F:\workspace\test>Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Anaconda3\lib\multiprocessing\spawn.py", line 100, in spawn_main
new_handle = steal_handle(parent_pid, pipe_handle)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 81, in steal_handle
_winapi.PROCESS_DUP_HANDLE, False, source_pid)
OSError: [WinError 87] The parameter is incorrect
Here is the full code:
# from http://www.defuze.org/archives/198-managing-your-process-with-the-cherrypy-bus.html
import sys
import logging
from logging import handlers
from cherrypy.process import wspbus
class MyBus(wspbus.Bus):
def __init__(self, name=""):
wspbus.Bus.__init__(self)
self.open_logger(name)
self.subscribe("log", self._log)
def exit(self):
wspbus.Bus.exit(self)
self.close_logger()
def open_logger(self, name=""):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
h = logging.StreamHandler(sys.stdout)
h.setLevel(logging.INFO)
h.setFormatter(logging.Formatter("[%(asctime)s] %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(h)
self.logger = logger
def close_logger(self):
for handler in self.logger.handlers:
handler.flush()
handler.close()
def _log(self, msg="", level=logging.INFO):
self.logger.log(level, msg)
import random
import string
from multiprocessing import Process
class Bank(object):
def __init__(self, queue):
self.bus = MyBus(Bank.__name__)
self.queue = queue
self.bus.subscribe("main", self.randomly_place_order)
self.bus.subscribe("exit", self.terminate)
def randomly_place_order(self):
order = random.sample(['BUY', 'SELL'], 1)[0]
code = random.sample(string.ascii_uppercase, 4)
amount = random.randint(0, 100)
message = "%s %s %d" % (order, ''.join(code), amount)
self.bus.log("Placing order: %s" % message)
self.queue.put(message)
def run(self):
self.bus.start()
self.bus.block(interval=0.01)
def terminate(self):
self.bus.unsubscribe("main", self.randomly_place_order)
self.bus.unsubscribe("exit", self.terminate)
from queue import Empty
class Broker(Process):
def __init__(self, queue):
Process.__init__(self)
self.queue = queue
self.bus = MyBus(Broker.__name__)
self.bus.subscribe("main", self.check)
def check(self):
try:
message = self.queue.get_nowait()
except Empty:
return
if message == "stop":
self.bus.unsubscribe("main", self.check)
self.bus.exit()
elif message.startswith("BUY"):
self.buy(*message.split(' ', 2)[1:])
elif message.startswith("SELL"):
self.sell(*message.split(' ', 2)[1:])
def run(self):
self.bus.start()
self.bus.block(interval=0.01)
def stop(self):
self.queue.put("stop")
def buy(self, code, amount):
self.bus.log("BUY order placed for %s %s" % (amount, code))
def sell(self, code, amount):
self.bus.log("SELL order placed for %s %s" % (amount, code))
if __name__ == '__main__':
from multiprocessing import Queue
queue = Queue()
broker = Broker(queue)
broker.start()
bank = Bank(queue)
bank.run()
The problem is that parts of the MyBus object are not picklable, and you're saving an instance of MyBus to your Broker instance. Because Windows lacks fork() support, when you call broker.start(), the entire state of broker must be pickled and recreated in the child process that multiprocessing spawns to execute broker.run. It works on Linux because Linux supports fork; it doesn't need to pickle anything in this case - the child process contains the complete state of the parent as soon as it is forked.
There are two ways to sole this problem. The first, and more difficult, way, is to make your broker instance picklable. To do that, you need to make MyBus picklable. The error you're getting right now refers to the logger attribute on MyBus, which is not picklable. That one is easy to fix; just add __getstate__/__setstate__ methods to MyBus, which are used to control how the object is pickled/unpickled. If we remove the logger when we pickle, and recreate it when we unpickle, we'll work around the issue:
class MyBus(wspbus.Bus):
...
def __getstate__(self):
self_dict = self.__dict__
del self_dict['logger']
return self_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.open_logger()
This works, but then we hit another pickling error:
Traceback (most recent call last):
File "async2.py", line 121, in <module>
broker.start()
File "C:\python34\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\python34\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\python34\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\python34\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\python34\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <class 'cherrypy.process.wspbus._StateEnum.State'>: attribute lookup State on cherrypy.process.wspbus failed
Turns outcherrypy.process.wspbus._StateEnum.State, which is an attribute on the wspbus.Bus class inherited by MyBus, is a nested class, and nested classes can't be pickled:
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return "states.%s" % self.name
The State object (surprise) is used to track the Bus instance's state. Since we're doing the pickling before we start up the bus, we could just remove the state attribute from the object when we pickle, and set it to States.STOPPED when we unpickle.
class MyBus(wspbus.Bus):
def __init__(self, name=""):
wspbus.Bus.__init__(self)
self.open_logger(name)
self.subscribe("log", self._log)
def __getstate__(self):
self_dict = self.__dict__
del self_dict['logger']
del self_dict['state']
return self_dict
def __setstate__(self, d):
self.__dict__.update(d)
self.open_logger()
self.state = wspbus.states.STOPPED # Initialize to STOPPED
With these changes, the code works as expected! The only limitation is that it's only safe to pickle MyBus if the bus hasn't started yet, which is fine for your usecase.
Again, this is the hard way. The easy way is to just remove the need to pickle the MyBus instance altogether. You can just create the MyBus instance in the child process, rather than the parent:
class Broker(Process):
def __init__(self, queue):
Process.__init__(self)
self.queue = queue
...
def run(self):
self.bus = MyBus(Broker.__name__) # Create the instance here, in the child
self.bus.subscribe("main", self.check)
self.bus.start()
self.bus.block(interval=0.01)
As long as you don't need to access broker.bus in the parent, this is the simpler option.
I am not sure why following decorator[validate_request] doesn't work. What is correct way to write such validation decorator?
def validate_request(req_type):
if req_type is 'json' and not request.json:
abort(400)
def decorator(func):
#functools.wraps(func)
def wrapped_func(*args, **kwargs):
return func(*args, **kwargs)
return wrapped_func
return decorator
#app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['PUT'])
#validate_request('json')
#json
def update_task(task_id):
# task = filter(lambda t: t['id'] == task_id, tasks)
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
#update task
for field in ['title', 'description', 'done']:
task[0][field] = request.json.get(field, task[0][field])
Error :-
Traceback (most recent call last):
File "C:\AGR\Programming\LearningPython\FlaskLearning\flask_rest\app.py", line 156, in <module>
#validate_request('json')
File "C:\AGR\Programming\LearningPython\FlaskLearning\flask_rest\app.py", line 144, in validate_request
if req_type is 'json' and not request.json:
File "C:\Anaconda\lib\site-packages\werkzeug\local.py", line 338, in __getattr__
return getattr(self._get_current_object(), name)
File "C:\Anaconda\lib\site-packages\werkzeug\local.py", line 297, in _get_current_object
return self.__local()
File "C:\Anaconda\lib\site-packages\flask\globals.py", line 20, in _lookup_req_object
raise RuntimeError('working outside of request context')
RuntimeError: working outside of request context
How should this be done in a more idiomatic way???
This is how your decorator should look like
def validate_request(f):
#functools.wraps(f)
def decorated_function(*args, **kwargs):
# Do something with your request here
data = flask.request.get_json()
if not data:
flask.abort(404)
return f(*args, **kwargs)
return decorated_function
and you will call it like this
#app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['PUT'])
#validate_request
def update_task(task_id):
# The rest of your code..
It's kind of an old post, but I think that it may benefit from a little correction:
decorated_function needs to return f(*args, **kws)
def validate_request(f):
#functools.wraps(f)
def decorated_function(*args, **kws):
# Do something with your request here
data = flask.request.get_json()
if not data:
flask.abort(404)
return f(*args, **kws)
return decorated_function
Otherwise you will encounter TypeError: The view function did not return a valid response. The function either returned None or ended without a return statement.
So I read here that decorated functions cannot be pickled. Indeed:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
pool = mp.Pool(processes=1)
for _ in pool.imap_unordered(f, range(10)):
pass
pool.close()
pool.join()
print 'All done'
Out:
Traceback (most recent call last):
File "deco0.py", line 19, in <module>
for _ in pool.imap_unordered(f, range(10)):
File "/Users/usualme/anaconda/lib/python2.7/multiprocessing/pool.py", line 659, in next
raise value
cPickle.PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed
But now if I replace map by a Process:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
p = mp.Process(target=f, args=(1,))
p.start()
p.join()
print 'All done'
Out:
1
Exception caught!
All done
Why is it working? Doesn't process need to pickle the decorated function as well?
It's working because you're running on Linux, which doesn't need to pickle f to call it in a child process via Process.__init__. This is because f gets inherited by the child via os.fork. If you run the same code on Windows (which lacks fork), or try to pass f to Pool.apply/Pool/map (both of which would need to pickle f to call it in a subprocess), you'll get an error.
This example will fail no matter what platform you use:
import multiprocessing as mp
def deco(f):
def wrapper(*args, **kwargs):
try:
f(*args, **kwargs)
except:
print 'Exception caught!'
return wrapper
#deco
def f(x):
print x
raise OverflowError
if __name__ == '__main__':
p = mp.Pool()
p.apply(f, args=(1,)) # f needs to be pickled here.
print 'All done'
Output:
1
Exception caught!
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 504, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 319, in _handle_tasks
put(task)
PicklingError: Can't pickle <type 'function'>: attribute lookup __builtin__.function failed