Can I configure python to have matlab like print? - python

Can I configure python to have matlab like print, so that when I just have a function
returnObject()
that it simply prints that object without me having to type print around it? I assume this is not easy, but something like if an object does not get bound by some other var it should get printed, so that this would work.
a = 5 #prints nothing
b = getObject() #prints nothing
a #prints 5
b #prints getObject()
getObject() #prints the object

If you use an ipython notebook individual cells work like this. But you can only view one object per cell by typing the objects name. To see multiple objects you'd need to call print, or use lots of cells.

You could write a script to modify the original script based on a set of rules that define what to print, then run the modified script.
A basic script to do this would be:
f = open('main.py', 'r')
p = open('modified.py', 'w')
p.write('def main(): \n')
for line in f:
temp = line
if len(temp) == 1:
temp = 'print(' + line + ')'
p.write('\t' + temp)
p.close()
from modified import main
main()
The script main.py would then look like this:
x = 236
x
output:
236

Idea is as follows: parse AST of Python code, replace every expression with call to print and content of expression as argument and then run the modified version. I'm not sure whether it works with every code, but you might try. Save it as matlab.py and run your code as python3 -m matlab file.py.
#!/usr/bin/env python3
import ast
import os
import sys
class PrintAdder(ast.NodeTransformer):
def add_print(self, node):
print_func = ast.Name("print", ast.Load())
print_call = ast.Call(print_func, [node.value], [])
print_statement = ast.Expr(print_call)
return print_statement
def visit_Expr(self, node):
if isinstance(node.value, ast.Call) and node.value.func.id == 'print':
return node
return self.add_print(node)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-')
args = parser.parse_args()
with args.infile as infile:
code = infile.read()
file_name = args.infile.name
tree = ast.parse(code, file_name, 'exec')
tree = PrintAdder().visit(tree)
tree = ast.fix_missing_locations(tree)
bytecode = compile(tree, file_name, 'exec')
exec(bytecode)
if __name__ == '__main__':
main()

Related

How to open a file passed in the command line arguments

I have my config encoded here:
#staticmethod
def getConfig(env):
pwd=os.getcwd()
if "win" in (platform.system().lower()):
f = open(pwd+"\config_"+env.lower()+"_data2.json")
else:
f = open(pwd+"/config_"+env.lower()+"_data2.json")
config = json.load(f)
f.close()
return config
#staticmethod
def isWin():
if "win" in (platform.system().lower()):
return True
else:
return False
I have 2 JSON files I want my script to read, but the way it's written above it only reads 1 of them. I want to know how to change it to something like:
f = open(pwd+"\config_"+env.lower()+"_data_f'{}'.json")
so it can read either dataset1.config or dataset2.config. I'm not sure if this is possible, but I want to do that so I can specify which file to run in the command line: python datascript.py -f dataset1.config or python datascript.py -f dataset2.config. Do I assign that entire open() call to a variable?
All you need to do is parse sys.argv to get the argument of the -f flag, then concatenate the strings and pass the result to open(). Try this:
import sys
### ... more code ...
#staticmethod
def getConfig(env):
pwd = os.getcwd()
file = None
try:
file = sys.argv[sys.argv.index('-f')+1]
except ValueError:
file = "data2.json"
if "win" in (platform.system().lower()):
f = open(pwd+"\config_"+env.lower()+"_" + file)
else:
f = open(pwd+"/config_"+env.lower()+"_" + file)
config = json.load(f)
f.close()
return config
sys.argv.index('-f') gives the index of -f in the command line arguments, so the argument must be filename. The try-except statement will provide a default value if no -f argument is given.

How to call input parameters into another python file using argparse?

I have the following files in a python pipeline
#in_para.py
nx = 31
c = 1
#solver.py
import numpy
import os
def simple(output):
ny = nx + 5
c_2 = c + 2
with open(os.path.join(output, 'Output.txt'), 'w') as f:
print("Hello stackoverflow!", file=f)
print("I have a question.", file=f)
if __name__=='__main__':
simple()
#main.py
import os
import numpy
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-C','--Chk',type=str, help='Choose arg')
parser.add_argument('-O','--output',type=str, default="./Output", help=' Provide the output Directory')
args = vars(parser.parse_args())
output = args['output']
if not os.path.exists(output):
os.makedirs(output)
if args['Chk'] == 'compo1':
simple(output)
if __name__=='__main__':
main()
I would like to call the input file in_para.py through command line argument such that
python3 main.py -I in_para -C compo1 -O Ouput_dir
gives me the desired output:
this is a simple test 36
this is a simple test2 3
I know if I do from in_para import * it will solve the problem (in a different manner), but I would like to call the input parameters from the command line as a positional argument and pass it on to solver.py. I haven't been able to find an example in the documentation similar to above task. Besides, the above is just an example, in_para.py and solver.py has several input parameters and several lines of code, so I dont want the user of main.py to go into either file and modify it.
Change the implementation in solver.py
def simple(nx, c, output): # provide parameters as function arguments
ny = nx + 5
c_2 = c + 2
print("this is a simple test", ny)
print("this is a simple test2", c_2)
with open(os.path.join(output, 'Output.txt'), 'w') as f:
print("Hello stackoverflow!", file=f)
print("I have a question.", file=f)
In the main parse the parameters file
... # other imports
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-C','--Chk', type=str, help='Choose arg')
parser.add_argument('-I','--input', type=argparse.FileType("r"), help='Input parameters')
parser.add_argument('-O','--output', type=str, help='output dir')
args = parser.parse_args()
output = args.output
# implement any way of parsing the input file, e.g. a json file
params = json.load(args.input)
... # do something else with the arguments
if args.Chk == 'compo1':
simple(output=output, **params)
... # further code lines
Your inputs file would look like
{"nx": 31, "c": 1}
Comment on edit:
Either you use args = vars(parser.parse_args()) which results in a dictionary or you just use the args = parser.parse_args() to select arguments by attribute. In your original post you used the first approach which I adopted in the first answer. However, I would prefer not using vars.
Note, that the code snippet to create the directory does not add value to the actual question of the post, so I removed it to avoid any distraction and added some placeholder instead.

How to modify file content using python?

The following code reads a file, uses syntax tree to append fullstop to docstrings of that file. How can I save changes made in called file? I understand present code doesn't change content in original file but the local variables accessing it. Can you suggest changes, if possible provide learning resource as well?
astcode.py
import ast
import sys
import os
filename = sys.argv[1]
# """Getting all the functions"""
ast_filename = os.path.splitext(ast.__file__)[0] + '.py'
with open(filename) as fd:
file_contents = fd.read()
module = ast.parse(file_contents)
# with open(filename, 'w') as file:
# module level
if(isinstance(module.body[0], ast.Expr)):
docstr = module.body[0].value.s
if(module.body[0].value.s not in '.'):
docstr += '.'
# ast.dump(module, include_attributes=True)
print(docstr)
# function level
function_definitions = [node for node in module.body if isinstance(node, ast.FunctionDef)]
for function in function_definitions:
# next_node = function_definitions[idx].body
next_node = function.body
for new_node in next_node:
if(isinstance(new_node, ast.Expr)):
if(isinstance(new_node.value, ast.Str)):
# Docstring stored in docstr variable.
docstr = new_node.value.s
if(docstr[-1] not in '.'):
new_node.value.s += '.'
# astString = ast.dump(new_node, annotate_fields=True, include_attributes=True)
# print(astString)
# compile(astString, filename, 'eval')
# print(exec(astString))
print(new_node.value.s)
# for line in module:
# file.write(line)
Example
testfile.py
def readDictionaryFile(dictionary_filename):
"""readDictionaryfile doc string"""
return []
def readTextFile(text_filename):
"""readTextfile doc string"""
return []
$ python3 astcode.py testfile.py
Expected
testfile.py
def readDictionaryFile(dictionary_filename):
"""readDictionaryfile doc string."""
return []
def readTextFile(text_filename):
"""readTextfile doc string."""
return []
Note: Fullstop(.) appended.
Looking at the documentation link, I notice there's a NodeVisitor and NodeTransformer with a code example. I looked at how they unparse a function def, and it's basically the same as you've done in your original question, so I used that.
# https://docs.python.org/3/library/ast.html#ast.NodeTransformer
class MyDocstringTransformer(ast.NodeTransformer):
def visit_FunctionDef(self, node):
if len(node.body):
if isinstance(node.body[0], ast.Expr):
if isinstance(node.body[0].value, ast.Constant):
if isinstance(node.body[0].value.value, str):
docstring = node.body[0].value.value
node.body[0].value.value = docstring + '.'
return node
Using python 3.9's ast module gets us https://docs.python.org/3/library/ast.html#ast.unparse which is about as close as we can get to changing the ast node and then rewriting the original file.
tree = ast.parse(file_contents)
new_tree = MyDocstringTransformer().visit(tree)
print(ast.unparse(new_tree))
Instead of just overwriting to the same filename, you may want to write to a temp file, then let the OS attempt to delete the old file and rename the temp file to the old name, thus performing the replace in the OS.

Python write both commands and their output to a file

Is there any way to write both commands and their output to an external file?
Let's say I have a script outtest.py :
import random
from statistics import median, mean
d = [random.random()**2 for _ in range(1000)]
d_mean = round(mean(d), 2)
print(f'mean: {d_mean}')
d_median = round(median(d), 2)
print(f'median: {d_median}')
Now if I want to capture its output only I know I can just do:
python3 outtest.py > outtest.txt
However, this will only give me an outtest.txt file with e.g.:
mean: 0.34
median: 0.27
What I'm looking for is a way to get an output file like:
import random
from statistics import median, mean
d = [random.random()**2 for _ in range(1000)]
d_mean = round(mean(d), 2)
print(f'mean: {d_mean}')
>> mean: 0.34
d_median = round(median(d), 2)
print(f'median: {d_median}')
>> median: 0.27
Or some other format (markdown, whatever). Essentially, something like jupyter notebook or Rmarkdown but with using standard .py files.
Is there any easy way to achieve this?
Here's a script I just wrote which quite comprehensively captures printed output and prints it alongside the code, no matter how it's printed or how much is printed in one go. It uses the ast module to parse the Python source, executes the program one statement at a time (kind of as if it was fed to the REPL), then prints the output from each statement. Python 3.6+ (but easily modified for e.g. Python 2.x):
import ast
import sys
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <script.py> [args...]")
exit(1)
# Replace stdout so we can mix program output and source code cleanly
real_stdout = sys.stdout
class FakeStdout:
''' A replacement for stdout that prefixes # to every line of output, so it can be mixed with code. '''
def __init__(self, file):
self.file = file
self.curline = ''
def _writerow(self, row):
self.file.write('# ')
self.file.write(row)
self.file.write('\n')
def write(self, text):
if not text:
return
rows = text.split('\n')
self.curline += rows.pop(0)
if not rows:
return
for row in rows:
self._writerow(self.curline)
self.curline = row
def flush(self):
if self.curline:
self._writerow(self.curline)
self.curline = ''
sys.stdout = FakeStdout(real_stdout)
class EndLineFinder(ast.NodeVisitor):
''' This class functions as a replacement for the somewhat unreliable end_lineno attribute.
It simply finds the largest line number among all child nodes. '''
def __init__(self):
self.max_lineno = 0
def generic_visit(self, node):
if hasattr(node, 'lineno'):
self.max_lineno = max(self.max_lineno, node.lineno)
ast.NodeVisitor.generic_visit(self, node)
# Pretend the script was called directly
del sys.argv[0]
# We'll walk each statement of the file and execute it separately.
# This way, we can place the output for each statement right after the statement itself.
filename = sys.argv[0]
source = open(filename, 'r').read()
lines = source.split('\n')
module = ast.parse(source, filename)
env = {'__name__': '__main__'}
prevline = 0
endfinder = EndLineFinder()
for stmt in module.body:
# note: end_lineno will be 1-indexed (but it's always used as an endpoint, so no off-by-one errors here)
endfinder.visit(stmt)
end_lineno = endfinder.max_lineno
for line in range(prevline, end_lineno):
print(lines[line], file=real_stdout)
prevline = end_lineno
# run a one-line "module" containing only this statement
exec(compile(ast.Module([stmt]), filename, 'exec'), env)
# flush any incomplete output (FakeStdout is "line-buffered")
sys.stdout.flush()
Here's a test script:
print(3); print(4)
print(5)
if 1:
print(6)
x = 3
for i in range(6):
print(x + i)
import sys
sys.stdout.write('I love Python')
import pprint
pprint.pprint({'a': 'b', 'c': 'd'}, width=5)
and the result:
print(3); print(4)
# 3
# 4
print(5)
# 5
if 1:
print(6)
# 6
x = 3
for i in range(6):
print(x + i)
# 3
# 4
# 5
# 6
# 7
# 8
import sys
sys.stdout.write('I love Python')
# I love Python
import pprint
pprint.pprint({'a': 'b', 'c': 'd'}, width=5)
# {'a': 'b',
# 'c': 'd'}
You can call the script and inspect its output. You'll have to make some assumptions though. The output is only from stdout, only from lines containing the string "print", and each print produces only one line of output. That being the case, an example command to run it:
> python writer.py script.py
And the script would look like this:
from sys import argv
from subprocess import run, PIPE
script = argv[1]
r = run('python ' + script, stdout=PIPE)
out = r.stdout.decode().split('\n')
with open(script, 'r') as f:
lines = f.readlines()
with open('out.txt', 'w') as f:
i = 0
for line in lines:
f.write(line)
if 'print' in line:
f.write('>>> ' + out[i])
i += 1
And the output:
import random
from statistics import median, mean
d = [random.random()**2 for _ in range(1000)]
d_mean = round(mean(d), 2)
print(f'mean: {d_mean}')
>>> mean: 0.33
d_median = round(median(d), 2)
print(f'median: {d_median}')
>>> median: 0.24
More complex cases with multiline output or other statements producing output, this won't work. I guess it would require some in depth introspection.
My answer might be similar to #Felix answer, but I'm trying to do it in a pythonic way.
Just grab the source code (mycode) and then execute it and capture the results, finally write them back to the output file.
Remember that I've used exec to the demonstration for the solution and you shouldn't use it in a production environment.
I also used this answer to capture stdout from exec.
import mycode
import inspect
import sys
from io import StringIO
import contextlib
source_code = inspect.getsource(mycode)
output_file = "output.py"
#contextlib.contextmanager
def stdoutIO(stdout=None):
old = sys.stdout
if stdout is None:
stdout = StringIO()
sys.stdout = stdout
yield stdout
sys.stdout = old
# execute code
with stdoutIO() as s:
exec(source_code)
# capture stdout
stdout = s.getvalue().splitlines()[::-1]
# write to file
with open(output_file, "w") as f:
for line in source_code.splitlines():
f.write(line)
f.write('\n')
if 'print' in line:
f.write(">> {}".format(stdout.pop()))
f.write('\n')
You can assign result to variable before print out and later on write those values to file
Here is example
import random
from statistics import median, mean
d = [random.random()**2 for _ in range(1000)]
d_mean = round(mean(d), 2)
foo = f'mean: {d_mean}'
print(foo)
d_median = round(median(d), 2)
bar = f'median: {d_median}'
print(bar)
# save to file
with open("outtest.txt", "a+") as file:
file.write(foo + "\n")
file.write(bar + "\n")
file.close()
argument "a+" mean open file and append content to file instead of overwrite if file was exist.

Is it possible to print a next line in a code?

Is it possible to make a method, which prints a next line of a code?
def print_next_line():
sth
import fxx
print 'XXX'
print_next_line()
file.split('/')
....
>>> 'XXX'
>>> 'file.split('/')'
I was thinking that It could be somewhere in the stack, but I'm not sure because it is next, not previous line.
Straight approach. I use inspect module to determine file and line where print_next_line was called. Later I read the file to find next string. You might want to add some error handling here (what if there is no next line in a file? and so on)
def print_next_line():
def get_line(f, lineno):
with open(f) as fp:
lines = fp.readlines()
return lines[lineno-1]
import inspect
callerframerecord = inspect.stack()[1]
frame = callerframerecord[0]
info = inspect.getframeinfo(frame)
line_ = info.lineno
file_ = info.filename
print get_line(file_, line_ + 1)
print 'XXX'
a = 1
print_next_line()
b = a*2
All you need is a profiling tool or just a debugger.
Use Python's inspect module:
import inspect
def print_next_line():
lineno = inspect.currentframe().f_back.f_lineno
with open(__file__) as f:
print(f.readlines()[lineno].rstrip())
Well you could open() your .py file and iterate to find specific line, then print it.

Categories