FastAPI, python, get - pass variable from different function - python

In file1 (fast_api file):
app = Fast API(title='myapp', docs_url='/', description="some_desc")
app.test = "In file1"
def run_fastapi(context):
uvicorn.run("api:app", reload=False)
#app.get("/api/some_value", responses=SOME_RESPONSES)
def get_parcels(response: Response):
test = app.store
print(test)
then in file2:
I want to be able to do:
import file1
if __name__ == "__main__":
file1.app.store = "In file2"
file1.app.run_fastapi()
But every time I am getting "in File1", how can i modify my code to be able to change value of app.test inside file2?

Related

Using tqdm progress bar in a if statement

Actually I have this code :
#!/usr/bin/env python3
import sys
import requests
import random
from multiprocessing.dummy import Pool
from pathlib import Path
requests.urllib3.disable_warnings()
print ('Give name of txt file on _listeNDD directory (without.txt)'),
file = str(input())
if Path('_listeNDD/'+file+'.txt').is_file():
print ('--------------------------------------------------------')
print ("Found")
print ('--------------------------------------------------------')
print ('Choose name for the output list (without .txt)'),
nomRez = str(input())
filename = '_listeNDD/'+file+'.txt'
domains = [i.strip() for i in open(filename , mode='r').readlines()]
else:
print ('--------------------------------------------------------')
exit('No txt found with this name')
def check(domain):
try:
r = requests.get('https://'+domain+'/test', timeout=5, allow_redirects = False)
if "[core]" in r.text:
with open('_rez/'+nomRez+'.txt', "a+") as f:
print('https://'+domain+'/test', file=f)
except:pass
mp = Pool(100)
mp.map(check, domains)
mp.close()
mp.join()
exit('finished')
Screen of the root file
With this code, it open text file on directory "_listeNDD" and I write new text file on directory "_rez".
Obviously it's super fast for ten elements but when it gets a bigger I would like a progress bar to know if I have time to make a coffee or not.
I had personally tried using the github tqdm but unfortunately it shows a progress bar for every job it does, while I only want one for everything...
Any idea?
Thank you
EDIT : Using this post, I did not succeed with
if __name__ == '__main__':
p = Pool(100)
r = p.map(check, tqdm.tqdm(range(0, 30)))
p.close()
p.join()
I don't have a high enough python level to master this so I may have badly integrated this into my code.
I also saw:
if __name__ == '__main__':
r = process_map(check, range(0, 30), max_workers=2)

imported function is not working when using it from another modules

I have been trying understand what is wrong with my code with no success...
I have two.py file which I have written with some function logs.py supposed to write an input to a file
and monitor_mode.py use thous function
When running the log.py as main everything just work fine and the file is created and written on, however when trying to use the same function in monitor_mode.py nothings seems to be written to the files and I have no idea why
I did try to debug and the code is directed to to right function and everything is going as excepted except there is no creation or data written to the file
thanks for any help
logs.py
serviceList = 'serviceList.txt'
statusLog = 'statusLog.txt'
def print_to_file(file_name, input):
with open(file_name, 'a+') as write_obj:
write_obj.write(input + '\n')
write_obj.close()
def add_timestamp(input):
timestamp = '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S") + '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
input = timestamp + '\n' + input
return input
if __name__ == "__main__":
import services
for i in range(3):
proc = services.list_of_process()
proc = add_timestamp(proc)
print_to_file(serviceList, proc)
monitor_mode.py
import logs
import services
serviceList = 'serviceList.txt'
statusLog = 'statusLog.txt'
def updates_log():
proc = services.list_of_process()
proc = logs.add_timestamp(proc)
logs.print_to_file(serviceList, proc)
print('Updates Logs\n' + proc)
if __name__ == "__main__":
for i in range(3):
updates_log()
EDIT1.1
the above code is running on ubuntu16.8
when running the code on win10 machine its working just fine.
services.list_of_process() - return a string

Unittest for click module

I wrote a simple command line utility that accepts a text file and searches for a given word in it using the click module.
sfind.py
import click
#click.command()
#click.option('--name', prompt='Word or string')
#click.option('--filename', default='file.txt', prompt='file name')
#click.option('--param', default=1, prompt="Use 1 for save line and 2 for word, default: ")
def find(name, filename, param):
"""Simple program that find word or string at text file and put it in new"""
try:
with open(filename) as f, open('result.txt', 'w') as f2:
count = 0
for line in f:
if name in line:
if param == 1:
f2.write(line + '\n')
elif param == 2:
f2.write(name + '\n')
count += 1
print("Find: {} sample".format(count))
return count
except FileNotFoundError:
print('WARNING! ' + 'File: ' + filename + ' not found')
if __name__ == '__main__':
find()
Now I need to write a test using unittest (using unittest is required).
test_sfind.py
import unittest
import sfind
class SfindTest(unittest.TestCase):
def test_sfind(self):
self.assertEqual(sfind.find(), 4)
if __name__ == '__main__' :
unittest.main()
When I run the test:
python -m unittest test_sfind.py
I get an error
click.exceptions.UsageError: Got unexpected extra argument (test_sfind.py)
How can I test this click command?
You can not simply call a click command and then expect it to return. The decorators applied to make a click command considerably change the behavior of the function. Fortunately the click frameworks provides for this through the CliRunner class.
Your command can be tested via unittest with something like this:
import unittest
import sfind
from click.testing import CliRunner
class TestSfind(unittest.TestCase):
def test_sfind(self):
runner = CliRunner()
result = runner.invoke(
sfind.find, '--name url --filename good'.split(), input='2')
self.assertEqual(0, result.exit_code)
self.assertIn('Find: 3 sample', result.output)
For those wanting to test exceptions in a click command, I have found this way to do it:
def test_download_artifacts(
self,
):
runner = CliRunner()
# test exception raised for invalid dir format
result = runner.invoke(
my_module.download_artifacts,
'--bucket_name my_bucket \
--artifact_dir artifact_dir'.split(),
input='2')
print(f"result.exception: {result.exception}")
assert "Enter artifact_dir ending" in str(result.exception)

Can I configure python to have matlab like print?

Can I configure python to have matlab like print, so that when I just have a function
returnObject()
that it simply prints that object without me having to type print around it? I assume this is not easy, but something like if an object does not get bound by some other var it should get printed, so that this would work.
a = 5 #prints nothing
b = getObject() #prints nothing
a #prints 5
b #prints getObject()
getObject() #prints the object
If you use an ipython notebook individual cells work like this. But you can only view one object per cell by typing the objects name. To see multiple objects you'd need to call print, or use lots of cells.
You could write a script to modify the original script based on a set of rules that define what to print, then run the modified script.
A basic script to do this would be:
f = open('main.py', 'r')
p = open('modified.py', 'w')
p.write('def main(): \n')
for line in f:
temp = line
if len(temp) == 1:
temp = 'print(' + line + ')'
p.write('\t' + temp)
p.close()
from modified import main
main()
The script main.py would then look like this:
x = 236
x
output:
236
Idea is as follows: parse AST of Python code, replace every expression with call to print and content of expression as argument and then run the modified version. I'm not sure whether it works with every code, but you might try. Save it as matlab.py and run your code as python3 -m matlab file.py.
#!/usr/bin/env python3
import ast
import os
import sys
class PrintAdder(ast.NodeTransformer):
def add_print(self, node):
print_func = ast.Name("print", ast.Load())
print_call = ast.Call(print_func, [node.value], [])
print_statement = ast.Expr(print_call)
return print_statement
def visit_Expr(self, node):
if isinstance(node.value, ast.Call) and node.value.func.id == 'print':
return node
return self.add_print(node)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('infile', type=argparse.FileType(), nargs='?', default='-')
args = parser.parse_args()
with args.infile as infile:
code = infile.read()
file_name = args.infile.name
tree = ast.parse(code, file_name, 'exec')
tree = PrintAdder().visit(tree)
tree = ast.fix_missing_locations(tree)
bytecode = compile(tree, file_name, 'exec')
exec(bytecode)
if __name__ == '__main__':
main()

Need to generate a new text file and save it every time I run a script in python

I currently have a program that appends to an already existing file called "ConcentrationData.txt". However, I would like to create a new text file every time the program is run, preferably with a file name that has the date and time. This is what my current script looks like:
def measureSample(self):
sys.stdout.flush()
freqD1, trandD1, absoD1 = dev.getMeasurement(LED_TO_COLOR='D1'])
freqD2, trandD2, absoD2 = dev.getMeasurement(LED_TO_COLOR='D2'])
absoDiff= absoD1 - absoD2
Coeff= 1
Conc = absoDiff/Coeff
Conc3SD = '{Value:1.{digits}f'.format(Value = Conc, digits=3)
self.textEdit.clear()
self.textEdit.setText('Concentration is {0}'.format(Conc3SD))
timeStr = time.strftime('%m-%d-%Y %H:%M:%S %Z')
outFile = open('ConcentrationData.txt','a')
outFile.write('{0} || Concentration: {1}'.format(timeStr, Conc3SD))
outFile.close()
How would I go about doing that?
(Also, I'm pretty new to python so I'm sorry if this sounds like a silly question).
You can do something on the lines of the following
class my_class:
_data_fd = None
def __init__(self,create,filename):
if(create):
self._data_fd = open(filename,'w')
def __del__(self):
if(self._data_fd != None):
self._data_fd.close()
def measureSample(self):
##do something here
outFile = self._data_fd
outFile.write('{0} || Concentration: {1}'.format(timeStr, Conc3SD))
if __name__ == '__main__':
timeStr = time.strftime('%m-%d-%Y_%H_%M_%S_%Z') #use unerscore instead of spaces
filename = "{0}.{1}".format("Data.txt",timeStr)
imy_class = my_class(1,filename)
imy_class.measureSample()
imy_class.measureSample() ##call multiple times the fd remains open for the lifetime of the object
del imy_class ### the file closes now and you will have multiple lines of data

Categories