Does using funcName when logging in Python have any performance impact? - python

Was just looking at the logging docs in Python and came across funcName as a parameter in the log formatter.
While it looks handy, great way to see exactly where the log is coming from obviously, someone's raised a concern about it, possibly that it would need to generate a stack trace that would be a performance hit.
I assume it uses something like sys._getframe() and not the inspect module, which would have an impact in performance.
Is funcName something we could use in a production environment or should we stay away?

Resist the temptation to guess, the source for logging is available to you as part of your Python distribution.
How it finds the function name (logging/__init__.py):
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:])
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
and then later:
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (filename, f.f_lineno, co.co_name)
break
return rv
also, no need to worry about the overhead: it figures out the function name before it works out whether or not you needed it so you might as well use it.

Here is a test app that shows that writing the filename and line number to a file costs around 1sec/500000 requests on my local machine.
#!/usr/bin/env python
import traceback, sys, time
def writeinfo(f, on=True):
# give the function something to do
s=sum(range(1000))
if on:
fr = sys._getframe(1)
s = "%s (line %s) " % (fr.f_code.co_filename, fr.f_lineno)
f.write(s)
cnt = 50000
t1 = time.time()
f = open('tempfile.log','w')
for i in range(cnt):
writeinfo(f)
f.close()
t2 = time.time()
for i in range(cnt):
writeinfo(f, on=False)
t3 = time.time()
print "Test time with file write: %s" % (t2-t1)
print "Test time without file write: %s" % (t3-t2)
Results:
Test time with file write: 1.17307782173
Test time without file write: 1.08166718483

Related

Class for file creation and directory validation

After reading some texts regarding creation of files under python, i decided to create this class which creates a new file on a directory, and creating a backup on the other directory if the file already exists (and if it's older than x hours )
The main reason i opened this question is to know if this is a correct way to write a class using try/except correctly, because actually i'm getting a little confused about the preference of using try/except instead if/elses.
Bellow, the working example:
import os
import datetime
class CreateXML():
def __init__(self, path, filename):
self.path = path
self.bkp_path = "%s\\backup" % path
self.filename = filename
self.bkp_file = "%s.previous" % filename
self.create_check = datetime.datetime.now()-datetime.timedelta(hours=-8)
#staticmethod
def create_dir(path):
try:
os.makedirs(path)
return True
except:
return False
#staticmethod
def file_check(file):
try:
open(file)
return True
except:
return False
def create_file(self, target_dir, target_file):
try:
target = "%s\\%s" % (target_dir, target_file)
open(target, 'w')
except:
return False
def start_creation(self):
try:
# Check if file exists
if self.file_check("%s\\%s" % (self.path, self.filename)):
self.create_dir(self.bkp_path)
creation = os.path.getmtime("%s\\%s" % (self.path, self.filename))
fcdata = datetime.datetime.fromtimestamp(creation)
# File exists and its older than 8 hours
if fcdata < self.create_check:
bkp_file_path = "%s\\%s " % (self.bkp_path, self.bkp_file)
new_file_path = "%s\\%s " % (self.path, self.filename)
# If backup file exists, erase current backup file
# Move existing file to backup and create new file.
if self.file_check("%s\\%s" % (self.bkp_path, self.bkp_file)):
os.remove(bkp_file_path)
os.rename(new_file_path, bkp_file_path)
self.create_file(self.bkp_path, self.bkp_file)
#No backup file, create new one.
else:
self.create_file(self.bkp_path, self.bkp_file)
else:
# Fresh creation
self.create_dir(self.path)
self.create_file(self.path, self.filename)
except OSError, e:
print e
if __name__ == '__main__':
path = 'c:\\tempdata'
filename = 'somefile.txt'
cx = CreateXML(path, filename)
cx.start_creation()
So, basically the real question here is:
-With the example above, the usage of try/except is correct?
-It's correct to perform the validations using try/except to check if file or directory allready exists? instead using a simplified version like this one:
import os
# Simple method of doing it
path = 'c:\\tempdata'
filename = 'somefile.txt'
bkp_path = 'c:\\tempdata\\backup'
bkp_file = 'somefile.txt.bkp'
new_file_path = "%s\\%s" % (path, filename)
bkp_file_path = "%s\\%s" % (bkp_path, bkp_file)
if not os.path.exists(path):
print "create path"
os.makedirs(bkp_path)
if not os.path.isfile(new_file_path):
print "create new file"
open(new_file_path, 'w')
else:
print"file exists, moving to backup folder"
#check if backup file exists
if not os.path.isfile(bkp_file_path):
print "New backup file created"
open(bkp_file_path, 'w')
else:
print "backup exists, removing backup, backup the current, and creating newfile"
os.remove(bkp_file_path)
os.rename(new_file_path, bkp_file_path)
open(bkp_file_path, 'w')
-If the usage of try/except is correct, its recomended write an a big class to create a file if it's possible to write a short version of it?
Please do not close this tread, since i'm really confused about what is the "most correct pythonic way to do it".
Thanks in advance.

LRU cache on hard drive python

I want to be able to decorate a function as you would do with functools.lru_cache, however, I want the results to be cached on the hard drive and not in memory. Looking around, I get a feeling this is a solved problem, and I was wondering if anyone could point me in the right direction (or at least give me a few more keywords to try googling)
I don't know if this will help or if it matters, but the function is computing images from unique filenames.
Here's some code to get you started:
from pathlib import Path
import pickle
import hashlib
import os
class LRU_Cache:
def __init__(self, directory, original_function, maxsize=10):
self.directory = directory
self.original_function = original_function
self.maxsize = maxsize
try:
os.mkdir(directory)
except OSError:
pass
def __call__(self, *args):
filename = hashlib.sha1(pickle.dumps(args)).hexdigest()
fullname = os.path.join(self.directory, filename)
try:
with open(fullname, 'rb') as f:
value = pickle.load(f)
Path(fullname).touch()
return value
except FileNotFoundError:
pass
value = self.original_function(*args)
with open(fullname, 'wb') as f:
pickle.dump(value, f)
filenames = os.listdir(self.directory)
if len(filenames) <= self.maxsize:
return
fullnames = [os.path.join(self.directory, filename)
for filename in filenames]
oldest = min(fullnames, key=lambda fn: os.stat(fn).st_mtime)
os.remove(oldest)
It uses hashes the arguments to create a unique filename for each function call. The function return value is pickled using that filename.
Cache hits unpickle the stored result and update the file modification time.
If the cache directory exceeds a target size, the oldest cache file is removed.
Use it like this:
def square(x):
print('!')
return x ** 2
sqr = LRU_Cache('square_cache', square, 10)
Now call sqr normally and results will be cached to disk.

Python MultiProcessing and Directory Creation

I am using Python Multiprocessing module to scrape a website. Now this website has over 100,000 pages. What I am trying to do is to put every 500 pages I retrieve into a separate folder. The problem is that though I successfully create a new folder, my script only populates the previous folder. Here is the code:
global a = 1
global b = 500
def fetchAfter(y):
global a
global b
strfile = "E:\\A\\B\\" + str(a) + "-" + str(b) + "\\" + str(y) + ".html"
if (os.path.exists( os.path.join( "E:\\A\\B\\" + str(a) + "-" + str(b) + "\\", str(y) + ".html" )) == 0):
f = open(strfile, "w")
if __name__ == '__main__':
start = time.time()
for i in range(1,3):
os.makedirs("E:\\Results\\Class 9\\" + str(a) + "-" + str(b))
pool = Pool(processes=12)
pool.map(fetchAfter, range(a,b))
pool.close()
pool.join()
a = b
b = b + 500
print time.time()-start
It is best for the worker function to only rely on the single argument it gets for determining what to do. Because that is the only information it gets from the parent process every time it is called. This argument can be almost any Python object (including a tuple, dict, list) so you're not really limited in the amount of information you pass to a worker.
So make a list of 2-tuples. Each 2-tuple should consist of (1) the file to get and (2) the directory where to stash it. Feed that list of tuples to map(), and let it rip.
I'm not sure if it is useful to specify the number of processes you want to use. Pool generally uses as many processes as your CPU has cores. That is usually enough to max out all the cores. :-)
BTW, you should only call map() once. And since map() blocks until everything is done, there is no need to call join().
Edit: Added example code below.
import multiprocessing
import requests
import os
def processfile(arg):
"""Worker function to scrape the pages and write them to a file.
Keyword arguments:
arg -- 2-tuple containing the URL of the page and the directory
where to save it.
"""
# Unpack the arguments
url, savedir = arg
# It might be a good idea to put a random delay of a few seconds here,
# so we don't hammer the webserver!
# Scrape the page. Requests rules ;-)
r = requests.get(url)
# Write it, keep the original HTML file name.
fname = url.split('/')[-1]
with open(savedir + '/' + fname, 'w+') as outfile:
outfile.write(r.text)
def main():
"""Main program.
"""
# This list of tuples should hold all the pages...
# Up to you how to generate it, this is just an example.
worklist = [('http://www.foo.org/page1.html', 'dir1'),
('http://www.foo.org/page2.html', 'dir1'),
('http://www.foo.org/page3.html', 'dir2'),
('http://www.foo.org/page4.html', 'dir2')]
# Create output directories
dirlist = ['dir1', 'dir2']
for d in dirlist:
os.makedirs(d)
p = Pool()
# Let'er rip!
p.map(processfile, worklist)
p.close()
if __name__ == '__main__':
main()
Multiprocessing, as the name implies, uses separate processes. The processes you create with your Pool do not have access to the original values of a and b that you are adding 500 to in the main program. See this previous question.
The easiest solution is to just refactor your code so that you pass a and b to fetchAfter (in addition to passing y).
Here's one way to implement it:
#!/usr/bin/env python
import logging
import multiprocessing as mp
import os
import urllib
def download_page(url_path):
try:
urllib.urlretrieve(*url_path)
mp.get_logger().info('done %s' % (url_path,))
except Exception as e:
mp.get_logger().error('failed %s: %s' % (url_path, e))
def generate_url_path(rootdir, urls_per_dir=500):
for i in xrange(100*1000):
if i % urls_per_dir == 0: # make new dir
dirpath = os.path.join(rootdir, '%d-%d' % (i, i+urls_per_dir))
if not os.path.isdir(dirpath):
os.makedirs(dirpath) # stop if it fails
url = 'http://example.com/page?' + urllib.urlencode(dict(number=i))
path = os.path.join(dirpath, '%d.html' % (i,))
yield url, path
def main():
mp.log_to_stderr().setLevel(logging.INFO)
pool = mp.Pool(4) # number of processes is unrelated to number of CPUs
# due to the task is IO-bound
for _ in pool.imap_unordered(download_page, generate_url_path(r'E:\A\B')):
pass
if __name__ == '__main__':
main()
See also Python multiprocessing pool.map for multiple arguments and the code
Brute force basic http authorization using httplib and multiprocessing from how to make HTTP in Python faster?

Problems with variable referenced before assignment when using os.path.walk

OK. I have some background in Matlab and I'm now switching to Python.
I have this bit of code under Pythnon 2.6.5 on 64-bit Linux which scrolls through directories, finds files named 'GeneralData.dat', retrieves some data from them and stitches them into a new data set:
import pylab as p
import os, re
import linecache as ln
def LoadGenomeMeanSize(arg, dirname, files):
for file in files:
filepath = os.path.join(dirname, file)
if filepath == os.path.join(dirname,'GeneralData.dat'):
data = p.genfromtxt(filepath)
if data[-1,4] != 0.0: # checking if data set is OK
data_chopped = data[1000:-1,:] # removing some of data
Grand_mean = data_chopped[:,2].mean()
Grand_STD = p.sqrt((sum(data_chopped[:,4]*data_chopped[:,3]**2) + sum((data_chopped[:,2]-Grand_mean)**2))/sum(data_chopped[:,4]))
else:
break
if filepath == os.path.join(dirname,'ModelParams.dat'):
l = re.split(" ", ln.getline(filepath, 6))
turb_param = float(l[2])
arg.append((Grand_mean, Grand_STD, turb_param))
GrandMeansData = []
os.path.walk(os.getcwd(), LoadGenomeMeanSize, GrandMeansData)
GrandMeansData = sorted(GrandMeansData, key=lambda data_sort: data_sort[2])
TheMeans = p.zeros((len(GrandMeansData), 3 ))
i = 0
for item in GrandMeansData:
TheMeans[i,0] = item[0]
TheMeans[i,1] = item[1]
TheMeans[i,2] = item[2]
i += 1
print TheMeans # just checking...
# later do some computation on TheMeans in NumPy
And it throws me this (though I would swear it was working a month ego):
Traceback (most recent call last):
File "/home/User/01_PyScripts/TESTtest.py", line 29, in <module>
os.path.walk(os.getcwd(), LoadGenomeMeanSize, GrandMeansData)
File "/usr/lib/python2.6/posixpath.py", line 233, in walk
walk(name, func, arg)
File "/usr/lib/python2.6/posixpath.py", line 225, in walk
func(arg, top, names)
File "/home/User/01_PyScripts/TESTtest.py", line 26, in LoadGenomeMeanSize
arg.append((Grand_mean, Grand_STD, turb_param))
UnboundLocalError: local variable 'Grand_mean' referenced before assignment
All right... so I went and did some reading and came up with this global variable:
import pylab as p
import os, re
import linecache as ln
Grand_mean = p.nan
Grand_STD = p.nan
def LoadGenomeMeanSize(arg, dirname, files):
for file in files:
global Grand_mean
global Grand_STD
filepath = os.path.join(dirname, file)
if filepath == os.path.join(dirname,'GeneralData.dat'):
data = p.genfromtxt(filepath)
if data[-1,4] != 0.0: # checking if data set is OK
data_chopped = data[1000:-1,:] # removing some of data
Grand_mean = data_chopped[:,2].mean()
Grand_STD = p.sqrt((sum(data_chopped[:,4]*data_chopped[:,3]**2) + sum((data_chopped[:,2]-Grand_mean)**2))/sum(data_chopped[:,4]))
else:
break
if filepath == os.path.join(dirname,'ModelParams.dat'):
l = re.split(" ", ln.getline(filepath, 6))
turb_param = float(l[2])
arg.append((Grand_mean, Grand_STD, turb_param))
GrandMeansData = []
os.path.walk(os.getcwd(), LoadGenomeMeanSize, GrandMeansData)
GrandMeansData = sorted(GrandMeansData, key=lambda data_sort: data_sort[2])
TheMeans = p.zeros((len(GrandMeansData), 3 ))
i = 0
for item in GrandMeansData:
TheMeans[i,0] = item[0]
TheMeans[i,1] = item[1]
TheMeans[i,2] = item[2]
i += 1
print TheMeans # just checking...
# later do some computation on TheMeans in NumPy
It does not give error massages. Even gives a file with data... but data are bloody wrong! I checked some of them manually by running commands:
import pylab as p
data = p.genfromtxt(filepath)
data_chopped = data[1000:-1,:]
Grand_mean = data_chopped[:,2].mean()
Grand_STD = p.sqrt((sum(data_chopped[:,4]*data_chopped[:,3]**2) \
+ sum((data_chopped[:,2]-Grand_mean)**2))/sum(data_chopped[:,4]))
on selected files. They are different :-(
1) Can anyone explain me what's wrong?
2) Does anyone know a solution to that?
I'll be grateful for help :-)
Cheers,
PTR
I would say this condition is not passing:
if filepath == os.path.join(dirname,'GeneralData.dat'):
which means you are not getting GeneralData.dat before ModelParams.dat. Maybe you need to sort alphabetically or the file is not there.
I see one issue with the code and the solution that you have provided.
Never hide the issue of "variable referencing before assignment" by just making the variable visible.
Try to understand why it happened?
Prior to creating a global variable "Grand_mean", you were getting an issue that you are accessing Grand_mean before any value is assigned to it. In such a case, by initializing the variable outside the function and marking it as global, only serves to hide the issue.
You see erroneous result because now you have made the variable visible my making it global but the issue continues to exist. You Grand_mean was never equalized to some correct data.
This means that section of code under "if filepath == os.path.join(dirname,..." was never executed.
Using global is not the right solution. That only makes sense if you do in fact want to reference and assign to the global "Grand_mean" name. The need for disambiguation comes from the way the interpreter prescans for assignment operators in function declarations.
You should start by assigning a default value to Grand_mean within the scope of LoadGenomeMeanSize(). You have 1 of 4 branches to actually assign a value to Grand_mean that has correct semantic meaning within one loop iteration. You are likely running into a case where
if filepath == os.path.join(dirname,'ModelParams.dat'): is true, but either
if filepath == os.path.join(dirname,'GeneralData.dat'): or if data[-1,4] != 0.0: is not. It's likely the second condition that is failing for you. Move the
The quick and dirty answer is you probably need to rearrange your code like this:
...
if filepath == os.path.join(dirname,'GeneralData.dat'):
data = p.genfromtxt(filepath)
if data[-1,4] != 0.0: # checking if data set is OK
data_chopped = data[1000:-1,:] # removing some of data
Grand_mean = data_chopped[:,2].mean()
Grand_STD = p.sqrt((sum(data_chopped[:,4]*data_chopped[:,3]**2) + sum((data_chopped[:,2]-Grand_mean)**2))/sum(data_chopped[:,4]))
if filepath == os.path.join(dirname,'ModelParams.dat'):
l = re.split(" ", ln.getline(filepath, 6))
turb_param = float(l[2])
arg.append((Grand_mean, Grand_STD, turb_param))
else:
break
...

Python - Check network map

I'm looking for some help on logic, the code is not very Pythonic I'm still learning. We map the Z: drive to different locations all the time. Here is what I'm trying to accomplish
1: Check for an old map on Z: say \192.168.1.100\old
2: Map the new location to Z: say \192.168.1.200\new
3: Make sure the new Z: mapping exists and is still connected
4: If it gets disconnected or unmapped reconnect it and log it
90% of the code works, if I run it as is, it unmaps the old drive and maps the new drive but the name of the old drive stays the same even though it's mapped to the new location and I can browse it. The other problem is I only want to run checkOldDrive one time and just let checkDrive run. Any advice is appreciated.
#!/usr/bin/python
import pywintypes
import win32com.client
import os.path
import sys
import string
import fileinput
import time
import win32net
##################################################################
# Check for old Z: map and remove it
# Map the new instance of Z:
# Check if the Z: drive exists
# if the drive exists report to status.log we are working
# if the drive DOES NOT exist map it and report errors to the log
###################################################################
def checkDrive():
if os.path.exists('z:'):
saveout = sys.stdout
fsock = open('status.log', 'a')
sys.stdout = fsock
print os.getenv("COMPUTERNAME"), " - ", time.ctime(), " - Connected"
sys.stdout = saveout
fsock.close()
else:
ivvinetwork = win32com.client.Dispatch('Wscript.Network')
network_drives = ivvinetwork.EnumNetworkDrives()
for mapped_drive in [network_drives.Item(i)
for i in range(0, network_drives.Count() -1 , 2)
if network_drives.Item(i)]:
ivvinetwork.RemoveNetworkDrive(mapped_drive, True, True)
drive_mapping = [
('z:', '\\\\192.168.1.100\\newmap', 'someuser', 'somepass')]
for drive_letter, network_path, user_name, user_pass in drive_mapping:
try:
ivvinetwork.MapNetworkDrive(drive_letter, network_path, True, user_name, user_pass)
saveout = sys.stdout
fsock = open('status.log', 'a')
sys.stdout = fsock
print os.getenv("COMPUTERNAME"), " - ", time.ctime(), " - ", drive_mapping, "Drive Has Been Mapped"
sys.stdout = saveout
fsock.close()
except Exception, err:
saveout = sys.stdout
fsock = open('status.log', 'a')
sys.stdout = fsock
print os.getenv("COMPUTERNAME"), " - ", time.ctime(), " - ", err
sys.stdout = saveout
fsock.close()
def checkOldDrive():
if os.path.exists('z:'):
ivvinetwork = win32com.client.Dispatch('Wscript.Network')
network_drives = ivvinetwork.EnumNetworkDrives()
for mapped_drive in [network_drives.Item(i)
for i in range(0, network_drives.Count() -1 , 2)
if network_drives.Item(i)]:
ivvinetwork.RemoveNetworkDrive(mapped_drive, True, True)
checkOldDrive()
checkDrive()
I've put together a script based on the one you laid out which I believe accomplishes what you have described.
I've tried to do it in a way that's both Pythonic and follows good programming principles.
In particular, I've done the following:
modularize much of the functionality into reusable functions
avoided repetition as much as possible. I did not factor out the hard-coded 'Z:' drive. I leave that to you as an exercise (as you see fit).
factored the logging definition into one location (so the format, etc are consistent and not repeated). The logging module made this easy.
moved all code out of the top level scope (except for some global constants). This allows the script to be run directly or imported by another script as a module.
Added some documentation strings to help document what each function does.
Kept each function short an succinct - so it can be read more easily on a single screen and in an isolated context.
Surely, there is still room for some improvement, but I have tested this script and it is functional. It should provide some good lessons while also helping you accomplish your task. Enjoy.
#!/usr/bin/env python
import os
import time
import win32com.client
import logging
old_mappings = [
r'\\192.168.1.100\old',
]
new_mapping = r'\\192.168.1.200\new'
LOG_FILENAME = 'status.log'
def main():
"""
Check to see if Z: is mapped to the old server; if so remove it and
map the Z: to the new server.
Then, repeatedly monitor the Z: mapping. If the Z: drive exists,
report to status.log that we are working. Otherwise, re-map it and
report errors to the log.
"""
setupLogging()
replaceMapping()
monitorMapping()
def replaceMapping():
if removeMapping():
createNewMapping()
def setupLogging():
format = os.environ['COMPUTERNAME'] + " - %(asctime)s - %(message)s"
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG, format=format)
def getCredentials():
"""
Return one of three things:
- an empty tuple
- a tuple containing just a username (if a password is not required)
- a tuple containing username and password
"""
return ('someuser', 'somepass')
def createNewMapping():
network = win32com.client.Dispatch('WScript.Network')
params = (
'Z:', # drive letter
new_mapping, # UNC path
True, # update profile
)
params += getCredentials()
try:
network.MapNetworkDrive(*params)
msg = '{params} - Drive has been mapped'
logging.getLogger().info(msg.format(**vars()))
except Exception as e:
msg = 'error mapping {params}'
logging.getLogger().exception(msg.format(**vars()))
def monitorMapping():
while True:
# only check once a minute
time.sleep(60)
checkMapping()
def checkMapping():
if getDriveMappings()['Z:'] == new_mapping:
msg = 'Drive is still mapped'
logging.getLogger().info(msg.format(**vars()))
else:
replaceMapping()
# From Python 2.6.4 docs
from itertools import izip_longest
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def getDriveMappings():
"""
Return a dictionary of drive letter to UNC paths as mapped on the
system.
"""
network = win32com.client.Dispatch('WScript.Network')
# http://msdn.microsoft.com/en-us/library/t9zt39at%28VS.85%29.aspx
drives = network.EnumNetworkDrives()
# EnumNetworkDrives returns an even-length array of drive/unc pairs.
# Use grouper to convert this to a dictionary.
result = dict(grouper(2, drives))
# Potentially several UNC paths will be connected but not assigned
# to any drive letter. Since only the last will be in the
# dictionary, remove it.
if '' in result: del result['']
return result
def getUNCForDrive(drive):
"""
Get the UNC path for a mapped drive.
Throws a KeyError if no mapping exists.
"""
return getDriveMappings()[drive.upper()]
def removeMapping():
"""
Remove the old drive mapping. If it is removed, or was not present,
return True.
Otherwise, return False or None.
"""
mapped_drives = getDriveMappings()
drive_letter = 'Z:'
if not drive_letter in mapped_drives:
return True
if mapped_drives[drive_letter] in old_mappings:
network = win32com.client.Dispatch('WScript.Network')
force = True
update_profile = True
network.RemoveNetworkDrive(drive_letter, force, update_profile)
return True
# return None
if __name__ == '__main__':
main()

Categories