Reloading a python file once a minute - python

I have an IRC bot and I'm parsing data, but to make it refresh I have to reload the plugin.I've noticed it works once I change something on the file or just open and save it again, once it reloads it gets the right info. That's the file that reloads the plugin:
reload.py
import collections
import glob
import os
import re
import sys
import traceback
if 'mtimes' not in globals():
mtimes = {}
if 'lastfiles' not in globals():
lastfiles = set()
def make_signature(f):
return f.func_code.co_filename, f.func_name, f.func_code.co_firstlineno
def format_plug(plug, kind='', lpad=0, width=40):
out = ' ' * lpad + '%s:%s:%s' % make_signature(plug[0])
if kind == 'command':
out += ' ' * (50 - len(out)) + plug[1]['name']
if kind == 'event':
out += ' ' * (50 - len(out)) + ', '.join(plug[1]['events'])
if kind == 'regex':
out += ' ' * (50 - len(out)) + plug[1]['regex']
return out
def reload(init=False):
changed = False
if init:
bot.plugs = collections.defaultdict(list)
bot.threads = {}
core_fileset = set(glob.glob(os.path.join("core", "*.py")))
for filename in core_fileset:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
eval(compile(open(filename, 'U').read(), filename, 'exec'),
globals())
except Exception:
traceback.print_exc()
if init: # stop if there's an error (syntax?) in a core
sys.exit() # script on startup
continue
if filename == os.path.join('core', 'reload.py'):
reload(init=init)
return
fileset = set(glob.glob(os.path.join('plugins', '*.py')))
# remove deleted/moved plugins
for name, data in bot.plugs.iteritems():
bot.plugs[name] = [x for x in data if x[0]._filename in fileset]
for filename in list(mtimes):
if filename not in fileset and filename not in core_fileset:
mtimes.pop(filename)
for func, handler in list(bot.threads.iteritems()):
if func._filename not in fileset:
handler.stop()
del bot.threads[func]
# compile new plugins
for filename in fileset:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
code = compile(open(filename, 'U').read(), filename, 'exec')
namespace = {}
eval(code, namespace)
except Exception:
traceback.print_exc()
continue
# remove plugins already loaded from this filename
for name, data in bot.plugs.iteritems():
bot.plugs[name] = [x for x in data
if x[0]._filename != filename]
for func, handler in list(bot.threads.iteritems()):
if func._filename == filename:
handler.stop()
del bot.threads[func]
for obj in namespace.itervalues():
if hasattr(obj, '_hook'): # check for magic
if obj._thread:
bot.threads[obj] = Handler(obj)
for type, data in obj._hook:
bot.plugs[type] += [data]
if not init:
print '### new plugin (type: %s) loaded:' % \
type, format_plug(data)
if changed:
bot.commands = {}
for plug in bot.plugs['command']:
name = plug[1]['name'].lower()
if not re.match(r'^\w+$', name):
print '### ERROR: invalid command name "%s" (%s)' % (name,
format_plug(plug))
continue
if name in bot.commands:
print "### ERROR: command '%s' already registered (%s, %s)" % \
(name, format_plug(bot.commands[name]),
format_plug(plug))
continue
bot.commands[name] = plug
bot.events = collections.defaultdict(list)
for func, args in bot.plugs['event']:
for event in args['events']:
bot.events[event].append((func, args))
if init:
print ' plugin listing:'
if bot.commands:
# hack to make commands with multiple aliases
# print nicely
print ' command:'
commands = collections.defaultdict(list)
for name, (func, args) in bot.commands.iteritems():
commands[make_signature(func)].append(name)
for sig, names in sorted(commands.iteritems()):
names.sort(key=lambda x: (-len(x), x)) # long names first
out = ' ' * 6 + '%s:%s:%s' % sig
out += ' ' * (50 - len(out)) + ', '.join(names)
print out
for kind, plugs in sorted(bot.plugs.iteritems()):
if kind == 'command':
continue
print ' %s:' % kind
for plug in plugs:
print format_plug(plug, kind=kind, lpad=6)
print
Let's say the plugin I want to reload once a minute is called flightsinfo.py. How can I do that ?

The important code looks like it's here:
mtime = os.stat(filename).st_mtime
if mtime != mtimes.get(filename):
mtimes[filename] = mtime
changed = True
try:
code = compile(open(filename, 'U').read(), filename, 'exec')
namespace = {}
eval(code, namespace)
except Exception:
traceback.print_exc()
continue
If the modification time of the file has changed (e.g. when you open and save it), then the compile/exec functionality is called.
There are a couple of ways to solve this issue, which depend on your situation:
Periodically update the mtime of the file. For example on linux you might run a cron job once a minute to touch /path/to/flightsinfo.py.
In reload.py refactor the functionality to reload to a function and call that from you python.
def reload(filename):
try:
code = compile(open(filename, 'U').read(), filename, 'exec')
namespace = {}
eval(code, namespace)
...

Related

separate line output by groups

My python script checks mysqldump and if any problems script prints :
Dump is old for db;
Dump is not complete for db;
Dump is empty for db;
MySQL dump does not exist for db;
Script logs these records to the file line by line.
My question is there are a way to format output in the file like:
Dump is old for db;
Dump is old for db;
Dump is old for db;
Dump is not complete for db;
Dump is not complete for db;
Dump is not complete for db;
Dump is empty for db;
Dump is empty for db;
Dump is empty for db;
Because now my file looks like:
Dump is old for db;
Dump is empty for db;
Dump is old for db;
MySQL dump does not exist for db;
...
etc
Here my small script :)
#!/bin/env python
import psycopg2
import sys,os
from subprocess import Popen, PIPE
from datetime import datetime
import smtplib
con = None
today = datetime.now().strftime("%Y-%m-%d")
log_dump_fail = '/tmp/mysqldump_FAIL'
log_fail = open(log_dump_fail,'w').close()
log_fail = open(log_dump_fail, 'a')
sender = 'PUT_SENDER_NAME_HERE'
receiver = ['receiver_name']
smtp_daemon_host = 'localhost'
def db_backup_file_does_not_exist(db_backup_file):
if not os.path.exists(db_backup_file): return True
else: return False
def dump_health(last_dump_row, file_name,db):
last_row = last_dump_row.rsplit(" ")
tms = ''.join(last_row[4:5])
status = last_row[1:3]
if (status) and (tms != today):
log_fail.write("\nDB is old for "+ str(db) + str(file_name) + ", \nDump finished at " + str(''.join(tms)))
log_fail.write("\n-------------------------------------------")
elif not (status) and (tms == None):
log_fail.write("\nDump is not complete for "+str(db) + str(file_name) + " , end of file is not correct")
log_fail.write("\n-------------------------------------------")
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes):
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def dump_size(dump_file, file_name,db):
size = os.path.getsize(dump_file)
if (size < 1024):
human_readable = humansize(size)
log_fail.write("\nDump is empty for " +str(db) + "\n" +"\t" + str (file_name)+", file size is " + str(human_readable))
log_fail.write("\n-------------------------------------------")
def report_to_noc(isubject,text):
TEXT = text
SUBJECT = subject
message = 'Subject: %s\n\n%s' % (SUBJECT, TEXT)
server = smtplib.SMTP(smtp_daemon_host)
server.sendmail(sender, receiver, message)
server.quit()
try:
con = psycopg2.connect(database='**', user='***', password='***', host='****')
cur = con.cursor()
cur.execute("""\
select ad.servicename, (select name from servers where id = ps.server_id) as servername
from packages as p, account_data as ad, package_servers as ps
where p.id=ad.package_id and
p.date_deleted IS NULL and
p.id=ps.package_id and
p.aktuel IS NULL and
p.pre_def_package_id = 4 and
p.mother_package_id !=0 and
ps.subservice_id=5 and
p.mother_package_id NOT IN (select id from packages where date_deleted IS NOT NULL)
ORDER BY servername;
""")
while (1):
row = cur.fetchone ()
if row == None:
break
db = row[0]
server_name = str(row[1])
if (''.join(server_name) == 'SKIP_THIS') or (''.join(server_name) == 'SKIP_THIS'):
continue
else:
db_backup_file = '/storage/backup/db/mysql/' + str(db) + '/current/' + str(db) + '.mysql.gz'
db_backup_file2 = '/storage/backup/' + str(''.join(server_name.split("DB"))) + '/mysql/' + str(db) + '/current/'+ str(db) + '.mysql.gz'
db_file_does_not_exist = False
db_file2_does_not_exist = False
if db_backup_file_does_not_exist(db_backup_file):
db_file_does_not_exist = True
if db_backup_file_does_not_exist(db_backup_file2):
db_file2_does_not_exist = True
if db_file_does_not_exist and db_file2_does_not_exist:
log_fail.write("\nMySQL dump does not exist for " + str(db) + "\n" + "\t" + str(db_backup_file2) + "\n" + "\t" + str(db_backup_file))
log_fail.write("\n-------------------------------------------")
continue
elif (db_file_does_not_exist) and not (db_file2_does_not_exist):
p_zcat = Popen(["zcat", db_backup_file2], stdout=PIPE)
p_tail = Popen(["tail", "-2"], stdin=p_zcat.stdout, stdout=PIPE)
dump_status = str(p_tail.communicate()[0])
dump_health(dump_status,db_backup_file2,db)
dump_size(db_backup_file2, db_backup_file2,db)
elif (db_file2_does_not_exist) and not (db_file_does_not_exist):
p_zcat = Popen(["zcat", db_backup_file], stdout=PIPE)
p_tail = Popen(["tail", "-2"], stdin=p_zcat.stdout, stdout=PIPE)
dump_status = str(p_tail.communicate()[0])
dump_health(dump_status,db_backup_file,db)
dump_size(db_backup_file,db_backup_file,db)
con.close()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
sys.exit(1)
log_fail.close()
if os.path.getsize(log_dump_fail) > 0:
subject = "Not all MySQL dumps completed successfully. Log file backup:" + str(log_dump_fail)
fh = open(log_dump_fail, 'r')
text = fh.read()
fh.close()
report_to_noc(subject,text)
else:
subject = "MySQL dump completed successfullyi for all DBs, listed in PC"
text = "Hello! \nI am notifying you that I checked mysqldump files this morning.\nThere are nothing to worry about. :)"
report_to_noc(subject,text)
You can process your log file after it has been written.
One option is to read your file and sort the lines:
lines = open('log.txt').readlines()
lines.sort()
open('log_sorted.txt', 'w').write("\n".join(lines))
This won't emit an empty line between log types.
Another option is to use a Counter:
from collections import Counter
lines = open('log.txt').readlines()
counter = Counter()
for line in lines:
counter[line] += 1
out_file = open('log_sorted.txt', 'w')
for line, num in counter.iteritems():
out_file.write(line * num + "\n")
Looks like you want to group the output of the script, rather than log the info as it comes while searching.
Easiest would be to maintain 4 lists, on each for empty, not empty and so on. In the script add the db names to appropriate list instead of logging, and then dump the lists one by one into the file with appropriate prefixes("not empty for" + dbname).
For example, remove all the log_fail.write() from the functions and replace them with list.append() and write a separate function that writes to the log file as you like:
Add lists:
db_dump_is_old_list = []
db_dump_is_empty_list = []
db_dump_is_not_complete_list = []
db_dump_does_not_exist_list = []
Modify the Functions:
def dump_health(last_dump_row, file_name,db):
last_row = last_dump_row.rsplit(" ")
tms = ''.join(last_row[4:5])
status = last_row[1:3]
if (status) and (tms != today):
db_dump_is_old_list.append(str(db))
#log_fail.write("\nDB is old for "+ str(db) + str(file_name) + ", \nDump finished at " + str(''.join(tms)))
#log_fail.write("\n-------------------------------------------")
elif not (status) and (tms == None):
db_dump_is_not_complete_list.append(str(db)
#log_fail.write("\nDump is not complete for "+str(db) + str(file_name) + " , end of file is not correct")
#log_fail.write("\n-------------------------------------------")
def dump_size(dump_file, file_name,db):
size = os.path.getsize(dump_file)
if (size < 1024):
human_readable = humansize(size)
db_dump_is_empty_list.append(str(db))
#log_fail.write("\nDump is empty for " +str(db) + "\n" +"\t" + str (file_name)+", file size is " + str(human_readable))
#log_fail.write("\n-------------------------------------------")
if db_file_does_not_exist and db_file2_does_not_exist:
db_dump_does_not_exist_list.append(str(db))
#log_fail.write("\nMySQL dump does not exist for " + str(db) + "\n" + "\t" + str(db_backup_file2) + "\n" + "\t" + str(db_backup_file))
#log_fail.write("\n-------------------------------------------")
continue
And add a logger function:
def dump_info_to_log_file():
log_dump_fail = '/tmp/mysqldump_FAIL'
log_fail = open(log_dump_fail,'w').close()
log_fail = open(log_dump_fail, 'a')
for dbname in db_dump_is_old_list:
log_fail.write("Dump is Old for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_is_empty_list:
log_fail.write("Dump is Empty for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_is_not_complete_list:
log_fail.write("Dump is Not Complete for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_does_not_exist_list:
log_fail.write("Dump Does Not Exist for" + str(dbname))
log_fail.close()
Or you could simply log as you are doing, and then read in the file, sort and write back the file.
Thank you all for all interesting ideas.
I have really tried all options :)
To my mind:
With Counter object the pros is to few lines of code.
But cons are - many read\write operations. Log file is not big, however, I decided to decrease read(s) \ write(s)
With array the cons are to many lines of code :) but the pros is - write to the file only once.
So I implemented arrays.. :)
Thank you guys!!!

Python script that performs line matching over stale files generates inconsistent output

I created a python script to parse mail (exim) logfiles and execute pattern matching in order to get a top 100 list for most send domains on my smtp servers.
However, everytime I execute the script I get a different count.
These are stale logfiles, and I cannot find a functional flaw in my code.
Example output:
1:
70353 gmail.com
68337 hotmail.com
53657 yahoo.com
2:
70020 gmail.com
67741 hotmail.com
54397 yahoo.com
3:
70191 gmail.com
67917 hotmail.com
54438 yahoo.com
Code:
#!/usr/bin/env python
import os
import datetime
import re
from collections import defaultdict
class DomainCounter(object):
def __init__(self):
self.base_path = '/opt/mail_log'
self.tmp = []
self.date = datetime.date.today() - datetime.timedelta(days=14)
self.file_out = '/var/tmp/parsed_exim_files-' + str(self.date.strftime('%Y%m%d')) + '.decompressed'
def parse_log_files(self):
sub_dir = os.listdir(self.base_path)
for directory in sub_dir:
if re.search('smtp\d+', directory):
fileInput = self.base_path + '/' + directory + '/maillog-' + str(self.date.strftime('%Y%m%d')) + '.bz2'
if not os.path.isfile(self.file_out):
os.popen('touch ' + self.file_out)
proccessFiles = os.popen('/bin/bunzip2 -cd ' + fileInput + ' > ' + self.file_out)
accessFileHandle = open(self.file_out, 'r')
readFileHandle = accessFileHandle.readlines()
print "Proccessing %s." % fileInput
for line in readFileHandle:
if '<=' in line and ' for ' in line and '<>' not in line:
distinctLine = line.split(' for ')
recipientAddresses = distinctLine[1].strip()
recipientAddressList = recipientAddresses.strip().split(' ')
if len(recipientAddressList) > 1:
for emailaddress in recipientAddressList:
# Since syslog messages are transmitted over UDP some messages are dropped and needs to be filtered out.
if '#' in emailaddress:
(login, domein) = emailaddress.split("#")
self.tmp.append(domein)
continue
else:
try:
(login, domein) = recipientAddressList[0].split("#")
self.tmp.append(domein)
except Exception as e:
print e, '<<No valid email address found, skipping line>>'
accessFileHandle.close()
os.unlink(self.file_out)
return self.tmp
if __name__ == '__main__':
domainCounter = DomainCounter()
result = domainCounter.parse_log_files()
domainCounts = defaultdict(int)
top = 100
for domain in result:
domainCounts[domain] += 1
sortedDict = dict(sorted(domainCounts.items(), key=lambda x: x[1], reverse=True)[:int(top)])
for w in sorted(sortedDict, key=sortedDict.get, reverse=True):
print '%-3s %s' % (sortedDict[w], w)
proccessFiles = os.popen('/bin/bunzip2 -cd ' + fileInput + ' > ' + self.file_out)
This line is non-blocking. Therefore it will start the command, but the few following lines are already reading the file. This is basically a concurrency issue. Try to wait for the command to complete before reading the file.
Also see:
Python popen command. Wait until the command is finished since os.popen is deprecated since python-2.6 (depending on which version you are using).
Sidenote - The same happens to the line below. The file may, or may not, exist after executing the following line:
os.popen('touch ' + self.file_out)

Trying to get output of ffprobe into variable

I am trying to grab the ffprobe values from the video file into a variable that I can compare against others or move the value into a database. The question I have; Is there a better way of doing it than below?
I don't like the multiple if/elif/line.startswith statements and I am not sure of split is the best way of getting the ffprobe values?
#!/usr/bin/python
import os, sys, subprocess, shlex, re, fnmatch
from subprocess import call
videoDrop_dir="/mnt/VoigtKampff/Temp/_Jonatha/test_drop"
for r,d,f in os.walk(videoDrop_dir):
for files in f:
print "Files: %s" % files
if files.startswith(('._', '.')):
print "This file: %s is not valid" % files
elif files.endswith(('.mov', '.mpg', '.mp4', '.wmv', '.mxf')):
fpath = os.path.join(r, files)
def probe_file(fpath):
cmnd = ['ffprobe', '-show_format', '-show_streams', '-pretty', '-loglevel', 'quiet', fpath]
p = subprocess.Popen(cmnd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print files
out, err = p.communicate()
print "===============================OUTPUT START: %s ===============================" % files
print out
for line in out.split('\n'):
line = line.strip()
if line.startswith('codec_name='):
s = line
codec_name = s.split('codec_name=', 1)
print "Codec is: %s" % codec_name[1]
codec_1 = codec_name[1]
elif line.startswith('codec_type='):
s = line
codec_type = s.split('codec_type=', 1)
print "Codec type is: %s" % codec_type[1]
codec_type1 = codec_type[1]
elif line.startswith('codec_long_name=', 1):
s = line
codec_long_name = s.split('codec_long_name=', 1)
print "Codec long name: %s" % codec_long_name[1]
codec_long_name = codec_long_name[1]
elif line.startswith('format_long_name=', 1):
s = line
format_long_name = s.split('format_long_name=', 1)
print "Format long name: %s" % format_long_name[1]
format_long_name = format_long_name[1]
elif line.startswith('width='):
s = line
width = s.split('width=', 1)
print "Video pixel width is: %s" % width[1]
p_width = width[1]
elif line.startswith('height='):
s = line
height = s.split('height=', 1)
print "Video pixel height is: %s" % height[1]
p_height = height[1]
elif line.startswith('bit_rate='):
s = line
bit_rate = s.split('bit_rate=', 1)
print "Bit rate is: %s" % bit_rate[1]
bit_rate1 = bit_rate[1]
elif line.startswith('display_aspect_ratio=', 1):
s = line
display_aspect_ratio = s.split('display_aspect_ratio=', 1)
print "Display aspect ratio: %s" % display_aspect_ratio[1]
display_aspect_ratio1 = display_aspect_ratio[1]
elif line.startswith('avg_frame_rate=', 1):
s = line
avg_frame_rate = s.split('avg_frame_rate=', 1)
print "Average Frame Rate: %s" % avg_frame_rate[1]
avg_frame_rate1 = avg_frame_rate[1]
print "===============================OUTPUT FINISH: %s ===============================" % files
if err:
print "===============================ERROR: %s ===============================" % files
print err
probe_file(fpath)
else:
if not files.startswith(('.mov', '.mpg', '.mp4', '.wmv', '.mxf')):
print "This file: %s is not a valid video file" % files
This is a bit late, but hopefully it helps others searching for a similar answer.
import json, subprocess
# grab info about video_file
ffprobe_cmd = '/home/ubuntu/bin/ffprobe -v quiet -print_format json -show_format -show_streams - i ' + v + ' 2>&1'
# print ffprobe_cmd
s = subprocess.Popen(ffprobe_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
ffprobe_out, err = s.communicate()
ffprobe_dict = json.loads(ffprobe_out)
From here, I re-use a common method, search_dict, which can be used like:
search_dict(ffprobe_dict, 'height')
def search_dict(my_dict, field):
"""Takes a dict with nested lists and dicts,
and searches all dicts for a key of the field
provided.
"""
fields_found = []
for key, value in my_dict.iteritems():
if key == field:
fields_found.append(value)
elif isinstance(value, dict):
results = search_dict(value, field)
for result in results:
fields_found.append(result)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
more_results = search_dict(item, field)
for another_result in more_results:
fields_found.append(another_result)
return fields_found
You should ask this question on https://codereview.stackexchange.com/

Can't get Python picture renaming program to work right

This program is supposed to run from command line like this:
python Filename Folder_photos_are_in New_Prefix
It should just rename the files, but it wasn't working, so I had it print out each function separately as it runs. It seems to work all right until the SortByMTime function at which time it deletes all of the files from my list except the last one.
Here is the code:
import sys
import os
import random
def filterByExtension(root, allfiles, extensions):
files = []
for f in allfiles:
hasExt = f.rfind('.')
if(hasExt > 0):
ext = f[hasExt+1::].lower()
if(ext in extensions):
f2 = os.path.join(root, f)
if(os.path.isfile(f2)):
files.append(f)
else:
print "Matching irregular file " + f
return files
def sortByMTime(root, matching):
photos = []
for f in matching:
path = os.path.join(root, f)
mtime = os.path.getmtime(path)
photos.append((mtime, f))
photos.sort()
return photos
def assignNames(prefix, inorder):
kount = str(len(inorder))
digits = len(kount)
template = '%%0%dd' % digits
newnames={}
kount = 0
for i in inorder:
kount += 1
s = template % kount
newnames [i[1]] = prefix+s+'.'+i[1].split('.')[1]
return newnames
print newnames
def makeTempName(allfiles):
r = random.randrange(0,1000000000)
name = "__temp%i__" % r
while name in allfiles:
r+=1
name = "__temp%i__" % r
return name
def makeScript(inorder, newnames, tempname):
chain = []
inthechain = {}
script = []
for i in inorder:
if i not in newnames:
continue
if newnames[i] == id:
del newnames[i]
continue
if newnames[i] not in newnames:
target = newnames[i]
script.append( (i,target) )
del newnames[i]
continue
else:
link = i
while True:
target = newnames[i]
chain.append( (link, target) )
inthechain[link] = True
link = target
if link not in newnames:
break
chain.reverse()
for (a, b) in chain:
print "This is in the chain: "
print chain
script.append(a,b)
del newnames[a]
return script
def doRenames(root, script):
for (old, new) in script:
print "%s -> %s" %(old, new)
fulloldpath=os.path.join(root, old)
fullnewpath = os.path.join(root, new)
if os.path.exists(fullnewpath):
print "File already exists"
os.exit(1)
else:
os.rename(fulloldpath, fullnewpath)
def main():
usrdir = []
allfiles = []
path = []
prefix = ''
args = sys.argv
args.pop(0) #remove first thing from list
if len(args) == 2: #Directory and Prefix are provided
print "Directory: ", args[0]
print "Prefix: ", args[1]
usrdir = args[0]
path = os.path.abspath(usrdir)
prefix = os.path.basename(path)
if len(args) == 1: #Only directory is provided
args.append(args[0]) #Makes the directory name the prefix as well
print "Directory: ", args[0]
print "Prefix: ", args[1]
usrdir = args[0]
path = os.path.abspath(usrdir)
prefix = os.path.basename(path)
if len(args) == 0 or len(args) > 2: #ends the programs because wrong number of arguments.
print "INVALID Number of Arguments:"
print "Usage: python bulkrename.py <directory> [<prefix>]"
exit(1)
allfiles = os.listdir(usrdir)
print "Printout of allfiles"
print allfiles
print
root = os.path.abspath(args[0])
print "root: ", root
print
extensions = ['jpeg', 'jpg', 'png', 'gif']
print "What Extensions should be able to be used: "
print extensions
print
matching = filterByExtension(root, allfiles, extensions)
print "What comes out of filterByExtension"
print matching
print
inorder = sortByMTime(path, matching)
print "What comes out of sortByMTime"
print inorder
print
newnames = assignNames(prefix, inorder)
print "What comes out of assignNames"
print newnames
print
tempname = makeTempName(allfiles)
print "What comes out of makeTempName"
print tempname
print
script = makeScript(inorder, newnames, tempname)
print "What comes out of makeScript"
print script
print
doRenames(path, script)
print "What comes out of doRenames"
print doRenames
print
main()
and here is the output from terminal
virus-haven:CS1410 chrislebaron$ python bulkrenamer.py bulkrename test
Directory: bulkrename
Prefix: test
Printout of allfiles
['.DS_Store', '20120902Snow_Canyon02.JPG', '20120902Snow_Canyon03.JPG', '20120902Snow_Canyon05.JPG', '20120902Snow_Canyon06.JPG', '20120902Snow_Canyon08.JPG', '20120902Snow_Canyon09.JPG', '20120902Snow_Canyon11.JPG', '20120902Snow_Canyon12.JPG', 'airplane.png', 'BackNoText.jpg', 'blah', 'FrontNoText.jpg', 'glitchbusters.jpg', 'IMG_7663.JPG', 'IMG_7664.JPG', 'Pomegranates.jpg', 'rccar.png']
root: /Users/chrislebaron/Documents/School/CS1410/bulkrename
What Extensions should be able to be used:
['jpeg', 'jpg', 'png', 'gif']
What comes out of filterByExtension
['20120902Snow_Canyon02.JPG', '20120902Snow_Canyon03.JPG', '20120902Snow_Canyon05.JPG', '20120902Snow_Canyon06.JPG', '20120902Snow_Canyon08.JPG', '20120902Snow_Canyon09.JPG', '20120902Snow_Canyon11.JPG', '20120902Snow_Canyon12.JPG', 'airplane.png', 'BackNoText.jpg', 'FrontNoText.jpg', 'glitchbusters.jpg', 'IMG_7663.JPG', 'IMG_7664.JPG', 'Pomegranates.jpg', 'rccar.png']
What comes out of sortByMTime
[(1322960835.0, 'rccar.png')]
What comes out of assignNames
{'rccar.png': 'bulkrename1.png'}
What comes out of makeTempName
__temp55210675__
What comes out of makeScript
[]
What comes out of doRenames
<function doRenames at 0x100dede60>
virus-haven:CS1410 chrislebaron$
You've goofed your indentation, mixing spaces and tabs. Use python -tt to verify.

FTP and python question

Can someone help me.
Why it is not working
import ftplib
import os
def readList(request):
machine=[]
login=[]
password=[]
for line in open("netrc"): #read netrc file
old=line.strip()
line=line.strip().split()
if old.startswith("machine"): machine.append(line[-1])
if old.startswith("login"): login.append(line[-1])
if old.startswith("password"): password.append(line[-1])
connectFtp(machine,login,password)
def connectFtp(machine,login,password):
for i in range(len(machine)):
try:
ftp = ftplib.FTP(machine[i])
print 'conected to ' + machine[i]
ftp.login(login[i],password[i])
print 'login - ' + login[i] + ' pasword -' + password[i]
except Exception,e:
print e
else:
ftp.cwd("PublicFolder")
print 'PublicFolder'
def upload(filename, file):
readList()
ext = os.path.splitext(file)[1]
if ext in (".txt", ".htm", ".html"):
ftp.storlines("STOR " + filename, open(file))
else:
ftp.storbinary("STOR " + filename, open(file, "rb"), 1024)
print 'success... yra'
upload('test4.txt', r'c:\example2\media\uploads\test4.txt')`
When it was together it was working. But when i separate it in to functions something happened, I cant understand what.
(Apart from the horrid indentation problems, which are presumably due to botched copy and paste otherwise you'd get syntax errors up the wazoo...!)...:
Scoping problem, first: connectFtp makes a local variable ftp so that variables goes away as soon as the function's done. Then upload tries using the variable, but of course it isn't there any more.
Add a return ftp at the end of connectFtp, a yield connectFtp instead of a plain call to the loop in readList, and use a for ftp in readList(): loop in upload.
Something like this?
import os
def readList(request):
machine = []
login = []
password = []
for line in open("netrc"): # read netrc file
old = line.strip()
line = line.strip().split()
if old.startswith("machine"): machine.append(line[-1])
if old.startswith("login"): login.append(line[-1])
if old.startswith("password"): password.append(line[-1])
yield connectFtp
def connectFtp(machine, login, password):
for i in range(len(machine)):
try:
ftp = ftplib.FTP(machine[i])
print 'conected to ' + machine[i]
ftp.login(login[i], password[i])
print 'login - ' + login[i] + ' pasword -' + password[i]
except Exception, e:
print e
else:
ftp.cwd("PublicFolder")
print 'PublicFolder'
return (ftp)
def upload(filename, file):
for ftp in readList():
ext = os.path.splitext(file)[1]
if ext in (".txt", ".htm", ".html"):
ftp.storlines("STOR " + filename, open(file))
else:
ftp.storbinary("STOR " + filename, open(file, "rb"), 1024)
print 'success... yra'
upload('test4.txt', r'c:\example2\media\uploads\test4.txt')
Error at line 19 something with try:
unindent does not math any outer indentation level

Categories