Simplify multiple try and except statements - python

What my function does:
creates a war file by processing the folder contents (/tmp/A, /tmp/B and so on)
Does some file path and folder path manipulations to get the final version from the war file.
store the file name in one variable and the version in another.
Push the war file to the Repository using curl.
I'm using multiple try & except blocks to catch the exception for each action and looks very un-pythonic.
Is there an elegant and simple way to approach this ? thanks in advance.
import shutil
import traceback
import subprocess
import os
import glob
def my_function(path_a, path_b, tmp_dir)
try:
<shutil.copy to the tmp dir>
except:
traceback.print_exc()
try:
war_process = subprocess.run([WAR GENERATION COMMAND], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(war_process.stdout.decode("utf-8"))
except subprocess.CalledProcessError as e:
exit_code = e.returncode
stderror = e.stderr
print(exit_code, stderror)
print(war_process.stderr.decode("utf-8"))
try:
output_folder = os.path.join("/tmp/dir/work", FILE_PATH, ARTIFACT_DATE, FILE_WO_EXTENSION)
except:
traceback.print_exc()
try:
file_name = list(glob.glob(os.path.join(output_folder, "*.war")))
except:
traceback.print_exc()
try:
file_path = os.path.join(output_folder, file_name)
except:
traceback.print_exc()
try:
os.rename(file_path, file_path.split('war')[0] + ".tgz")
except:
traceback.print_exc()
try:
file_version = os.path.basename(file_path)
except:
traceback.print_exc()
cmd = "curl -u username -T ....)"
try:
curl_output = subprocess.run([cmd], shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(curl_output.stdout.decode("utf-8"))
except subprocess.CalledProcessError as er:
print(proc_c.stderr.decode("utf-8"))
exit_c = er.returncode
std = er.stderr
print(exit_c, std)

You can write try once, then handle all the exceptions later:
try:
output_folder = os.path.join("/tmp/dir/work", FILE_PATH, ARTIFACT_DATE, FILE_WO_EXTENSION)
file_name = list(glob.glob(os.path.join(output_folder, "*.war")))
file_path = os.path.join(output_folder, file_name)
os.rename(file_path, file_path.split('war')[0] + ".tgz")
except FooException:
print('foo')
except BarException:
print('bar')

First of all never use bare except in your code. Read bullets 6 to 11 in PEP8:Programming Recommendations.
My suggestion is to use this code instead:
def my_function(path_a, path_b, tmp_dir)
try:
<shutil.copy to the tmp dir>
except:
traceback.print_exc()
try:
war_process = subprocess.run([WAR GENERATION COMMAND], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(war_process.stdout.decode("utf-8"))
output_folder = os.path.join("/tmp/dir/work", FILE_PATH, ARTIFACT_DATE, FILE_WO_EXTENSION)
file_name = list(glob.glob(os.path.join(output_folder, "*.war")))
file_path = os.path.join(output_folder, file_name)
os.rename(file_path, file_path.split('war')[0] + ".tgz")
file_version = os.path.basename(file_path)
except subprocess.CalledProcessError as e:
exit_code = e.returncode
stderror = e.stderr
print(exit_code, stderror)
print(war_process.stderr.decode("utf-8"))
except Exception as e:
print(f'The program caught an exception {e}')
traceback.print_exc()
cmd = "curl -u username -T ....)"
try:
curl_output = subprocess.run([cmd], shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(curl_output.stdout.decode("utf-8"))
except subprocess.CalledProcessError as er:
print(proc_c.stderr.decode("utf-8"))
exit_c = er.returncode
std = er.stderr
print(exit_c, std)
The second and the third try/except blocks must stay separated because both catch the same exception.
Also, if any of the blocks you created here catch a specific exception in this list, you should behave them like you behave the subprocess.CalledProcessError .
Best practice is to write one try block with multiple excepts in which each except block catches a specific exception.

You don't need to put a try/except block after every statement. It would be better to put multiple statements in a try/except block
def my_function(path_a, path_b, tmp_dir)
try:
<shutil.copy to the tmp dir>
war_process = subprocess.run([WAR GENERATION COMMAND], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(war_process.stdout.decode("utf-8"))
except subprocess.CalledProcessError as e:
exit_code = e.returncode
stderror = e.stderr
print(exit_code, stderror)
print(war_process.stderr.decode("utf-8"))
try:
output_folder = os.path.join("/tmp/dir/work", FILE_PATH, ARTIFACT_DATE, FILE_WO_EXTENSION)
file_name = list(glob.glob(os.path.join(output_folder, "*.war")))
file_path = os.path.join(output_folder, file_name)
os.rename(file_path, file_path.split('war')[0] + ".tgz")
file_version = os.path.basename(file_path)
except:
traceback.print_exc()
cmd = "curl -u username -T ....)"
try:
curl_output = subprocess.run([cmd], shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(curl_output.stdout.decode("utf-8"))
except subprocess.CalledProcessError as er:
print(proc_c.stderr.decode("utf-8"))
exit_c = er.returncode
std = er.stderr
print(exit_c, std)
```

Related

How to fine tune the redundant code in python

I am trying to create two set of databases every time inside my python script for the same of which I have written the below set of code which looks redundant to me since I am initializing the variable ext 2 times and hence if anyone can suggest some better alternatives, that would be really helpful.
def create_datasets(database, ext):
try:
dataset = "bq --location=US mk -d " + database + ext
try:
return_cd, out, err = run_sys_command(dataset)
except Exception as e:
print(e)
except Exception as e:
print(e)
raise
ext = ''
create_datasets(database, ext)
ext = '_stg'
create_datasets(database, ext)
Use a loop?
for ext in ['', '_stg']:
create_datasets(database, ext)
About your function:
def create_datasets(database, ext):
try:
dataset = f"bq --location=US mk -d {database}{ext}"
return_cd, out, err = run_sys_command(dataset)
except Exception as e: # <- you should catch sub exception!
print(e)
Any exception Exception raised in your function is caught and handled by the inner try block. The outer therefore seems redundant.
def create_datasets(database, ext):
try:
dataset = "bq --location=US mk -d " + database + ext
return_cd, out, err = run_sys_command(dataset)
except Exception as e:
print(e)

Print tqdm progress bar from external python script called by subprocess

My main goal is to run an external python script (client script) by subprocess in another python script (caller script). The console of the caller script displays all output from the client script except the tqdm output - so it is not a general problem of displaying output by subprocess, but a specific problem related to subprocess interacting with tqdm.
My secondary goal is that I'd like to understand it :). So thoughtful explanations are much appreciated.
The client script (train.py) contains several tqdm calls. So far, I haven't seen much difference in outputs between various tqdm argument configurations, so let's use the simplest one.
In train.py:
...
from tqdm import tqdm
with tqdm(total = 10, ncols = 80,
file=sys.stdout, position = 0, leave = True,
desc='f5b: pbar.set_postfix') as pbar:
for i in range(10):
pbar.update(1)
postfix = {'loss': '{0:.4f}'.format(1+i)}
pbar.set_postfix(**postfix)
sleep(0.1)
The caller script experiment.py executes the function execute_experiment which calls train.py by the argument command_list:
def execute_experiment(command_list):
tic = time.time()
try:
process = subprocess.Popen(
command_list, shell=False,
encoding='utf-8',
bufsize=0,
stdin=subprocess.DEVNULL,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
# Poll process for new output until finished
# Source: https://stackoverflow.com/q/37401654/7769076
while process.poll() is None:
nextline = process.stdout.readline()
sys.stdout.write(nextline)
sys.stdout.flush()
except CalledProcessError as err:
print("CalledProcessError: {0}".format(err))
sys.exit(1)
except OSError as err:
print("OS error: {0}".format(err))
sys.exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
if (process.returncode == 0):
toc = time.time()
time1 = str(round(toc - tic))
return time1
else:
return 1
This script call to the above code snipped of train.py does return output but the tqdm output is stopped after 0 seconds and looks like this:
f5b: pbar.set_postfix: 0%| | 0/10 [00:00<?, ?it/s]
f5b: pbar.set_postfix: 10%|█▊ | 1/10 [00:00<00:00, 22310.13it/s]
The script call to the original code of train.py returns all output except tqdm output:
Training default configuration
train.py data --use-cuda ...
device: cuda
...
Comments:
shell = False: As python script calls python script. When shell=True, the client script is not called at all
bufsize=0: To prevent buffering
The train.py call is preceded with sys.executable to ensure that the python interpreter of the corresponding conda environment is called when on local machine.
Questions:
Does tqdm.set_postfix prevent passing the progress bar output upstream? I know this happens when tqdm.set_description is invoked, e.g. by:
pbar.set_description('processed: %d' %(1 + i))
This code contains it:
def train(self, dataloader, max_batches=500, verbose=True, **kwargs):
with tqdm(total=max_batches, disable=not verbose, **kwargs) as pbar:
for results in self.train_iter(dataloader, max_batches=max_batches):
pbar.update(1)
postfix = {'loss': '{0:.4f}'.format(results['mean_outer_loss'])}
if 'accuracies_after' in results:
postfix['accuracy'] = '{0:.4f}'.format(
np.mean(results['accuracies_after']))
pbar.set_postfix(**postfix)
# for logging
return results
Is the nested function call the reason why the progress bar is not shown?
The order of calls is experiment.py > train.py > nested.py.
train.py calls the train function in nested.py by:
for epoch in range(args.num_epochs):
results_metatraining = metalearner.train(meta_train_dataloader,
max_batches=args.num_batches,
verbose=args.verbose,
desc='Training',
# leave=False
leave=True
)
Alternatives tried out with no success:
### try2
process = subprocess.Popen(command_list, shell=False, encoding='utf-8',
stdin=DEVNULL, stdout=subprocess.PIPE)
while True:
output = process.stdout.readline().strip()
print('output: ' + output)
if output == '' and process.poll() is not None: # end of output
break
if output: # print output in realtime
print(output)
else:
output = process.communicate()
process.wait()
### try6
process = subprocess.Popen(command_list, shell=False,
stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(process.stdout.readline, ""):
yield stdout_line
process.stdout.close()
return_code = process.wait()
print('return_code' + str(return_code))
if return_code:
raise subprocess.CalledProcessError(return_code, command_list)
### try7
with subprocess.Popen(command_list, stdout=subprocess.PIPE,
bufsize=1, universal_newlines=True) as p:
while True:
line = p.stdout.readline()
if not line:
break
print(line)
exit_code = p.poll()
I think readline is waiting for '\n', and tqdm is not creating new lines, maybe this could help (I did not try):
import io
def execute_experiment(command_list):
tic = time.time()
try:
process = subprocess.Popen(
command_list, shell=False,
encoding='utf-8',
bufsize=1,
stdin=subprocess.DEVNULL,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
# Poll process for new output until finished
# Source: https://stackoverflow.com/q/37401654/7769076
reader = io.TextIOWrapper(process.stdout, encoding='utf8')
while process.poll() is None:
char = reader.read(1)
sys.stdout.write(char)
sys.stdout.flush()
except CalledProcessError as err:
print("CalledProcessError: {0}".format(err))
sys.exit(1)
except OSError as err:
print("OS error: {0}".format(err))
sys.exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
raise
if (process.returncode == 0):
toc = time.time()
time1 = str(round(toc - tic))
return time1
else:
return 1

Waiting for the previous function to be completed in Python

is there any solution in python which lets a function execute after the previous one was finished?
Here is one of the ideas I'm using now. But it is not solving the problem when files are larger and the program needs more time.
def copy_to_jumphost(self):
try:
if self.connect():
stdin, stdout, stderr = self.client.exec_command('sshpass -p %s scp -r %s#%s:%s/' % (self.password,self.username,self.hostname,self.log_path) + self.lista[self.file_number].rstrip() + ' ' + '/home/%s/' % (self.username) + self.lista[self.file_number].rstrip())
except (AttributeError, TypeError) as e:
print("Error occurred:", e)
try:
if self.connect():
if self.copy_to_jumphost():
ftp_client = self.client.open_sftp()
ftp_client.get(filepath, self.localpath)
print("Success! \nFile coppied to %s" %(self.localpath))
else:
time.sleep(5)
ftp_client = self.client.open_sftp()
ftp_client.get(filepath, self.localpath)
print("Success but needed some time! \nFile coppied to %s" %(self.localpath))
except (AttributeError, TypeError) as e:
print("Error occurred:", e)
Perfect situation for me will be if in else statement there is a solution to wait for finishing the copy_to_jumphost() function, because time.sleep(5) will fail if I will need to copy larger files.

Extract Python subprocess.CalledProcessError argument list

Following is the code snippet from our code base
# global library function
def check_call_noout(params, acceptable_exit_codes = (0,), shellCommand=False):
FNULL = open('/dev/null', 'w')
sts = 1
try:
if shellCommand:
p = subprocess.Popen(params, stdout=FNULL, stderr=FNULL,shell=True)
else:
p = subprocess.Popen(params, stdout=FNULL, stderr=FNULL)
sts = os.waitpid(p.pid, 0)[1]
except:
raise
finally:
FNULL.close()
exit_code = sts >> 8
if exit_code not in acceptable_exit_codes:
raise subprocess.CalledProcessError(exit_code, params)
# driver code
try:
cmd = ["/bin/tar", "--acls", "--selinux", "--xattrs", "-czf a.tar.gz", "./a.xml", "--exclude","\"lost+found\""]
check_call_noout(cmd,(0,1),False)
except subprocess.CalledProcessError as e:
print e.output, e.returncode
except Exception as e:
print(type(e).__name__, e)
I want to print the params argument value passed into subprocess.CalledProcessError object which is raised inside the library function and caught in my driver code.
However, I cannot change anything in the library function check_call_noout()
If i understand correctly, getting the __dict__ attribute of subprocess.CalledProcessError class would do:
try:
subprocess.run([...], check=True)
except subprocess.CalledProcessError as e:
print(e.__dict__)
You can also use vars function which will call __dict__ internally:
try:
subprocess.run([...], check=True)
except subprocess.CalledProcessError as e:
print(vars(e))

Python 3 generating MP4 files via MP4Box

I overhauled my code to use popen and still not getting a new MP4 file.
If I manually type it out in terminal MP4Box works.
But when I run the code below it doesn't raise any error whatsoever. So I am puzzled as what is not working.
The two definition are for tkinter GUI buttons.
global now
#put widgets here
def picapture():
try:
global now
debugLog.insert(0.0, "Date Initialization Done\n")
now = datetime.datetime.now().strftime("%F_%X")
debugLog.insert(0.0, now + "\n")
camera.start_preview(fullscreen=False,window = (200,0,1100,640))
camera.start_recording('/home/pi/' + now + '.h264')
except:
print(traceback.format_exc(limit=10))
def stopcapture():
try:
global now
camera.stop_recording()
camera.stop_preview()
MP4Box_Command = ['MP4Box', '-add', now + '.h264', '-0', now + '.mp4']
convert = subprocess.Popen(MP4Box_Command, stdout=PIPE, bufsize=1, universal_newlines=True)
except subprocess.CalledProcessError as e:
raise RunTimeError("Commmand '{}'return with error (code {}): {}".format(e.cmd,e.returncode,e.output))
except:
print(traceback.format_exc(limit=10))

Categories