I am toying around with flags at the moment and came across some weird behavior when using tf.app.run(). The following code snippet should simply print the string given via the command line.
import tensorflow as tf
# command line flags
tf.app.flags.DEFINE_string('mystring', 'Hello World!',
'''String to print to console.''')
FLAGS = tf.app.flags.FLAGS
def main():
print(FLAGS.mystring)
if __name__ == '__main__':
tf.app.run()
During execution, this error is thrown:
Traceback (most recent call last):
File "", line 1, in
runfile('/path/flags.py', wdir='/path')
File
"/home/abc/anaconda3/envs/tensorflow/lib/python3.5/site-packages/spyder/utils/site/sitecustomize.py",
line 710, in runfile
execfile(filename, namespace)
File
"/home/abc/anaconda3/envs/tensorflow/lib/python3.5/site-packages/spyder/utils/site/sitecustomize.py",
line 101, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/path/flags.py", line 19, in
tf.app.run()
File
"/home/abc/anaconda3/envs/tensorflow/lib/python3.5/site-packages/tensorflow/python/platform/app.py",
line 126, in run
_sys.exit(main(argv))
TypeError: main() takes 0 positional arguments but 1 was given
...which is strange because I do not give a single argument to main(). However, if I add an underscore def main(_):, it works without any errors.
I couldn't find a doc where this is use of the underscore is described. Does anybody know what happens here? Thank you!
The error message I see in Pycharm IDE when I execute your code is clearer.
Traceback (most recent call last):
File "D:/PycharmProjects/TensorFlow/self.py", line 30, in <module>
tf.app.run()
File "D:\\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\platform\app.py",
line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
TypeError: main() takes 0 positional arguments but 1 was given
_sys.exit(main(_sys.argv[:1] + flags_passthrough)) is trying to call our main method with one argument.
This is the run method in app.py
A stripped down version of the run method can be used to test.
import tensorflow as tf
import sys as _sys
from tensorflow.python.platform import flags
# command line flags
tf.app.flags.DEFINE_string('mystring', 'Hello World!',
'''String to print to console.''')
FLAGS = tf.app.flags.FLAGS
def run(main=None, argv=None):
"""Runs the program with an optional 'main' function and 'argv' list."""
f = flags.FLAGS
# Extract the args from the optional `argv` list.
args = argv[1:] if argv else None
# Parse the known flags from that list, or from the command
# line otherwise.
# pylint: disable=protected-access
flags_passthrough = f._parse_flags(args=args)
# pylint: enable=protected-access
main = main or _sys.modules['__main__'].main
print (_sys.argv[:1])
# Call the main function, passing through any arguments
# to the final program.
#_sys.exit(main(_sys.argv[:1] + flags_passthrough))
# Call the main function with no arguments
#_sys.exit(main())
def main():
print(FLAGS.mystring)
if __name__ == '__main__':
#tf.app.run()
run()
print(_sys.argv[1:]) prints ['D:/PycharmProjects/TensorFlow/self.py'] since
argv[0] is the script name passed to the interpreter.
Maybe You can find the answer from this link for explaining how app.py runshow app.py runs
You can also define your main function with def main(argv=None): ..., or like you do def main(_): ..., so this can make it works for you giving the main function parameters.
I had a similar problem when using cProfile and calling script with
python -m cProfile train.py
Seems like the problem was that tf.app.run called main inside cProfile which wasn't ready for argument passing. In my case the solution was to specify main in tf.app.run():
tf.app.run(main=main)
Don't forget to add fake argument in main like this def main(_):.
Related
I'm trying to run these lines of code in atom and python3.6 :
from pycall import CallFile, Call, Application
import sys
def call():
c = Call('SIP/200')
a = Application('Playback', 'hello-world')
cf = CallFile(c, a)
cf.spool()
if __name__ == '__main__':
call()
But I receive this error:
Traceback (most recent call last):
File "/home/pd/gits/voiphone/main.py", line 12, in <module>
call()
File "/home/pd/gits/voiphone/main.py", line 9, in call
cf.spool()
File "/home/pd/telephonerelayEnv/lib/python3.6/site-packages/pycall/callfile.py", line 135, in spool
self.writefile()
File "/home/pd/telephonerelayEnv/lib/python3.6/site-packages/pycall/callfile.py", line 123, in writefile
f.write(self.contents)
File "/home/pd/telephonerelayEnv/lib/python3.6/site-packages/pycall/callfile.py", line 118, in contents
return '\n'.join(self.buildfile())
File "/home/pd/telephonerelayEnv/lib/python3.6/site-packages/pycall/callfile.py", line 100, in buildfile
raise ValidationError
pycall.errors.ValidationError
I would appreciate if you help me solving my problem.
thank you in advance
Looking at the source code for the validity check, it appears like the only check that could be catching you out is the one that verifies the spool directory. By default this is set to /var/spool/asterisk/outgoing but can be changed when you create the callfile:
cf = CallFile(c, a, spool_dir='/my/asterisk/spool/outgoing')
I discovered a strange error when using concurrent.futures to read from multiple text files.
Here is a small reproducible example:
import os
import concurrent.futures
def read_file(file):
with open(os.path.join(data_dir, file),buffering=1000) as f:
for row in f:
try:
print(row)
except Exception as e:
print(str(e))
if __name__ == '__main__':
data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
files = ['file1', 'file2']
with concurrent.futures.ProcessPoolExecutor() as executor:
for file,_ in zip(files,executor.map(read_file,files)):
pass
file1 and file2 are arbitrary text files in the data directory.
I am getting the following error (basically a process tries to read data_dir variable before it is assigned):
concurrent.futures.process._RemoteTraceback:
"""
Traceback (most recent call last):
File "C:\Users\my_username\AppData\Local\Continuum\Anaconda3\lib\concurrent\futures\process.py", line 175, in _process_worker
r = call_item.fn(*call_item.args, **call_item.kwargs)
File "C:\Users\my_username\AppData\Local\Continuum\Anaconda3\lib\concurrent\futures\process.py", line 153, in _process_chunk
return [fn(*args) for args in chunk]
File "C:\Users\my_username\AppData\Local\Continuum\Anaconda3\lib\concurrent\futures\process.py", line 153, in <listcomp>
return [fn(*args) for args in chunk]
File "C:\Users\my_username\Downloads\example.py", line 5, in read_file
with open(os.path.join(data_dir, file),buffering=1000) as f:
NameError: name 'data_dir' is not defined
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "example.py", line 16, in <module>
for file,_ in zip(files,executor.map(read_file,files)):
File "C:\Users\my_username\AppData\Local\Continuum\Anaconda3\lib\concurrent\futures\_base.py", line 556, in result_iterator
yield future.result()
File "C:\Users\my_username\AppData\Local\Continuum\Anaconda3\lib\concurrent\futures\_base.py", line 405, in result
return self.__get_result()
File "C:\Users\my_username\AppData\Local\Continuum\Anaconda3\lib\concurrent\futures\_base.py", line 357, in __get_result
raise self._exception
NameError: name 'data_dir' is not defined
If I place data_dir assignment before if __name__ == '__main__': block, I don't get this error and the code executes as expected.
What is causing this error? Clearly, data_dir is assigned before any asynchronous calls should be made in both cases.
ProcessPoolExecutor spaws a new Python process, imports the right module and calls the function you provide.
As data_dir will only be defined when you run the module, not when you import it, the error is to be expected.
Providing the data_dir file descriptor as an argument to read_file might work, as I believe that processes inherit the file descriptors of their parents. You'd need to check, though.
If were to use a ThreadPoolExecutor however, your example should work, as the spawned threads share memory.
fork() not available on windows, so python use spawn to start new process, which will start a fresh python interpreter process, no memory will be shared, but python will try to recreate worker function environment in the new process, that's why module level variable works. See doc for more detail.
I am writing a program and I need the program to return a integer say changed to remind the controller that there are something changed.But when I want to return this value, I kept receiving this message:
return 1
SyntaxError: 'return' outside function
I have read some posts online they all said it is the indent problem but I am sure My indent is right because I have tried such a simple program but the failure still exists.
# -*- coding: utf-8 -*-
if __name__ == "__main__":
return 1
here is the error message:
runfile('/home/iphyer/untitled1.py', wdir='/home/iphyer')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 699, in runfile
execfile(filename, namespace)
File "/usr/local/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 81, in execfile
builtins.execfile(filename, *where)
File "/home/iphyer/untitled1.py", line 9
return 1
SyntaxError: 'return' outside function
Although my real programe is more complex than this but the structure is the same.
I am confused because if I comment the return statement all program can be run without any warning.
It is quit confusing. I guess I can not using if __name__ == "__main__ for return some value?
Thank you!~
The body of the if __name__ == "__main__": executes when the file in question is being run as a standalone program. In that case, what you may want is exit() or sys.exit() which allow you to return limited information to the script that invoked this one. Typically you can either signal success with exit(0) or set of possible failures with a non-zero result (limited to 1 - 255). return is for subroutines/function, exit is for programs.
Because if statement isn't a method. You can return only from methods in Python.
I am trying to make a very simple application that allows for people to define their own little python scripts within the application. I want to execute the code in a new process to make it easy to kill later. Unfortunately, Python keeps giving me the following error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.7/dist-packages/spyderlib/widgets/externalshell/sitecustomize.py", line 540, in runfile
execfile(filename, namespace)
File "/home/skylion/Documents/python_exec test.py", line 19, in <module>
code_process = Process(target=exec_, args=(user_input_code))
File "/usr/lib/python2.7/multiprocessing/process.py", line 104, in __init__
self._args = tuple(args)
TypeError: 'code' object is not iterable
>>>
My code is posted below
user_input_string = '''
import os
world_name='world'
robot_name='default_body + os.path.sep'
joint_names=['hingejoint0', 'hingejoint1', 'hingejoint2', 'hingejoint3', 'hingejoint4', 'hingejoint5', 'hingejoint6', 'hingejoint7', 'hingejoint8']
print(joint_names)
'''
def exec_(arg):
exec(arg)
user_input_code = compile(user_input_string, 'user_defined', 'exec')
from multiprocessing import Process
code_process = Process(target=exec_, args=(user_input_code))
code_process.start()
What am I missing? Is there something wrong with my user_input_string? With my compile options? Any help would be appreciated.
I believe args must be a tuple. To create a single-element tuple, add a comma like so: args=(user_input_code,)
I am trying to write a script that will start an new engine.
Using some code from IPython source I have:
[engines.py]
def make_engine():
from IPython.parallel.apps import ipengineapp as app
app.launch_new_instance()
if __name__ == '__main__':
make_engine(file='./profiles/security/ipcontroller-engine.json', config='./profiles/e2.py')
if I run this with python engines.py in the command line I run into a configuration problem and my traceback is:
Traceback (most recent call last):
File "engines.py", line 30, in <module>
make_engine(file='./profiles/security/ipcontroller-engine.json', config='./profiles/e2.py')
File "engines.py", line 20, in make_engine
app.launch_new_instance(**kwargs)
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 562, in launch_instance
app = cls.instance(**kwargs)
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/configurable.py", line 354, in instance
inst = cls(*args, **kwargs)
File "<string>", line 2, in __init__
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 94, in catch_config_error
app.print_help()
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 346, in print_help
self.print_options()
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 317, in print_options
self.print_alias_help()
File "/Users/martin/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 281, in print_alias_help
cls = classdict[classname]
KeyError: 'BaseIPythonApplication'
if I do a super ugly hack like the following, it works:
def make_engine():
from IPython.parallel.apps import ipengineapp as app
app.launch_new_instance()
if __name__ == '__main__':
from sys import argv
argv = ['--file=./profiles/security/ipcontroller-engine.json', '--config=./profiles/e2.py'] #OUCH this is ugly!
make_engine()
Why can't I pass the keyword arguments in the launch_new_instance method?
What are the right keyword arguments?
Where can I get the entry point to entering my configuration options?
Thanks,
Martin
The way to instantiate a new ipengine using the IPEngineApp api is:
def make_engine():
from IPython.parallel.apps.ipengineapp import IPEngineApp
lines1 ="a_command()"
app1 = IPEngineApp()
app1.url_file = './profiles/security/ipcontroller-engine.json'
app1.cluster_id = 'e2'
app1.startup_command = lines1
app1.init_engine()
app1.start()
However, this starts a new ipengine process that takes control of the script execution process, so there is no way I can start multiple engines in the same script using this method.
Thus I had to fallback on the subprocess module to spawn all additional new ipengines:
import subprocess
import os
pids = []
for num in range(1,3):
args = ["ipengine", "--config", os.path.abspath("./profiles/e%d.py" % num), "--file",os.path.abspath( "./profiles/security/ipcontroller-engine.json") ]
pid = subprocess.Popen(args).pid
pids.append(pid)