So I'm writing a script that will search a folder recursively for a .mkv file and upload it to my NAS. I have the script working but I can't see the progress. I imported this progressbar I found on github and was able to use the demo to see it work. It is what I want however the FTP example they included is to retrieve a file from the server. I need to upload.
How do I get the uploaded amount on an interval so I can run an update to the progress bar?
Bellow is the code I have that works for the upload
import os
import ftplib
import ntpath
ntpath.basename("a/b/c")
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
from glob import glob
FileTransferList = [y for x in os.walk('/tmp/rippedMovies') for y in glob(os.path.join(x[0], '*.mkv'))]
global ftp
def FTP_GLOB_transfer(URL, UserName, Password):
ftp = ftplib.FTP(URL, UserName, Password) # connect to host, default port
print URL, UserName, Password
for file in FileTransferList:
FileName = path_leaf(file)
print file
TheFile = open(file, 'r')
ftp.storbinary('STOR ' + FileName, TheFile, 1024)
TheFile.close()
ftp.quit()
ftp = None
FTP_GLOB_transfer('<IP>', '<USER>', '<PASSWORD>')
I figured it out. I decided to use TQDM as I found some easier to read documentation for it. I was assuming that storbinary() had to have a return or something to tell it's progress, just didn't know I was looking for a callback.
Anyways I added a new import from tqdm import tqdm
I added this filesize = os.path.getsize(file) to get the file size of the file
Then I replaced ftp.storbinary('STOR ' + FileName, TheFile, 1024) with this code
with tqdm(unit = 'blocks', unit_scale = True, leave = False, miniters = 1, desc = 'Uploading......', total = filesize) as tqdm_instance:
ftp.storbinary('STOR ' + FileName, TheFile, 2048, callback = lambda sent: tqdm_instance.update(len(sent)))
And overall the new working code looks like
import os
import ftplib
import ntpath
from tqdm import tqdm
ntpath.basename("a/b/c")
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
from glob import glob
FileTransferList = [y for x in os.walk('/tmp/rippedMovies') for y in glob(os.path.join(x[0], '*.mkv'))]
global ftp
def FTP_GLOB_transfer(URL, UserName, Password):
ftp = ftplib.FTP(URL, UserName, Password) # connect to host, default port
print URL, UserName, Password
for file in FileTransferList:
FileName = path_leaf(file)
filesize = os.path.getsize(file)
print file
TheFile = open(file, 'r')
with tqdm(unit = 'blocks', unit_scale = True, leave = False, miniters = 1, desc = 'Uploading......', total = filesize) as tqdm_instance:
ftp.storbinary('STOR ' + FileName, TheFile, 2048, callback = lambda sent: tqdm_instance.update(len(sent)))
TheFile.close()
ftp.quit()
ftp = None
It now outputs as
/tmp/rippedMovies/TestMovie.mkv
Uploading......: 51%|████████████████████▉ | 547M/1.07G
[00:05<00:14, 36.8Mblocks/s]
Related
My code was working just fine before adding the hash function. I was getting the list of all folders and files in my directory in the Pretty Table. Once I added the hash function, I got maybe 5 of the files in that directory with hashes in the table. I am not sure where I have gone wrong. Please forgive me, I am new to this. We are not learning to code from scratch, but have to modify existing codes to function the way we need it to.
# Python Standard Libaries
import os #file system methode
import hashlib #hashing function
import sys #system methods
import time #time conversions
# Python 3rd Party Libraries
from prettytable import PrettyTable # pip install prettytable
# Local Functions
def GetFileMetaData(fileName):
#obtain file system metadata
try:
metaData = os.stat(fileName) # Use the stat method to obtain meta data
fileSize = metaData.st_size # Extract fileSize and MAC Times
timeLastAccess = metaData.st_atime
timeLastModified = metaData.st_mtime
timeCreated = metaData.st_ctime
macTimeList = [timeLastModified, timeCreated, timeLastAccess] # Group the MAC Times in a List
return True, None, fileSize, macTimeList
except Exception as err:
return False, str(err), None, None
# Psuedo Constants
# Start of the Script
tbl = PrettyTable(['FilePath','FileSize','UTC-Modified', 'UTC-Accessed', 'UTC-Created', 'SHA-256 HASH'])
#file check
while True:
targetFolder = input("Enter Target Folder: ")
if os.path.isdir(targetFolder):
break
else:
print("\nInvalid Folder ... Please Try Again")
print("Walking: ", targetFolder, "\n")
print()
for currentRoot, dirList, fileList in os.walk(targetFolder):
for nextFile in fileList:
fullPath = os.path.join(currentRoot, nextFile)
absPath = os.path.abspath(fullPath)
fileSize = os.path.getsize(absPath)
success, errInfo, fileSize, macList = GetFileMetaData(absPath)
if success:
#convert to readable Greenich Time
modTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[0]))
accTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[1]))
creTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[2]))
#hashing function
with open(absPath, 'rb') as target:
fileContents = target.read()
sha256Obj = hashlib.sha256()
sha256Obj.update(fileContents)
hexDigest = sha256Obj.hexdigest()
tbl.add_row( [ absPath, fileSize,modTime, accTime, creTime, hexDigest] )
tbl.align = "l" # align the columns left justified
# display the table
print (tbl.get_string(sortby="FileSize", reversesort=True))
print("\nScript-End\n")
I wrote a python code for my project that stores file name of files on the SFTP server in a list and does the same for local directory and compares the 2 lists and then download the uncommon files. code takes its server credentials from a separate text file of format
CONTENTS OF TEXT FILE (config9.txt)
127.0.0.1,username,password,log22.txt,/new45,C:\Users\udit\Desktop\ftp2\bob16
my program executed one by one for different IP's. I wanted it to run it parallely for all IP's using multithreading. I modified my code but it still executes it one by one. Whats the error in it?
import os
import pysftp
import csv
import socket
from stat import S_IMODE, S_ISDIR, S_ISREG
import time
import threading
from threading import Thread
from time import sleep
import os.path
import shutil
import fileinput
import lock
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
def mainprocess():
with open("config9.txt", "r") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
print(row)
IP=row[0]
myUsername=row[1]
myPassword=row[2]
txtfile=row[3]
remotepath=row[4]
localpath=row[5]
with pysftp.Connection(host=IP, username=myUsername, password=myPassword, cnopts=cnopts) as sftp:
r = str(socket.gethostbyaddr(IP))
print("connection successful with " + r)
def get_file2(sftp, remotedir):
result = []
for entry in sftp.listdir_attr(remotedir):
remotepath = remotedir + "/" + entry.filename
mode = entry.st_mode
if S_ISDIR(mode):
result += get_file2(sftp, remotepath)
elif S_ISREG(mode):
if (time.time() - entry.st_mtime) // (24 * 3600) > 0:
result.append(entry.filename)
return result
remote_path = remotepath
d = get_file2(sftp, remote_path)
def process():
myName = []
with open(filename, 'r+') as f:
for name in f.readlines():
while '\n' in name:
name = name.replace('\n', '')
myName.append(name)
print(myName)
print(len(myName))
import os
filtered_list = [string for string in d if string not in myName]
print("filtered list:", filtered_list)
print(len(filtered_list))
local_path =localpath
def compare_files(sftp, remotedir, remotefile, localdir, preserve_mtime=True):
remotepath = remotedir + "/" + remotefile
localpath = os.path.join(localdir, remotefile)
mode = sftp.stat(remotepath).st_mode
if S_ISDIR(mode):
try:
os.mkdir(localpath, mode=777)
except OSError:
pass
compare_files(sftp, remotepath, localpath, preserve_mtime)
elif S_ISREG(mode):
sftp.get(remotepath, localpath, preserve_mtime=True)
for file in filtered_list:
compare_files(sftp, remote_path, file, local_path, preserve_mtime=False)
with open(filename, 'a') as f:
for item in filtered_list:
f.write("%s\n" % item)
filename=txtfile
try:
file = open(filename, 'r')
process()
except IOError:
file = open(filename, 'w')
process()
t=time.time()
t1=threading.Thread(target=mainprocess(),args=())
t1.start()
t1.join()
I am trying to download files using python script from my ftp server...However i am getting the files which are of size 0 kb...i can't understand exactly where i am wrong...I am actually searching the files by a particular string in filename and then downloading all the files having that string on my ftp in a given directory.
Here is my code:
# Libraries
import re
import os
import ftplib
import ntpath
ftp = ftplib.FTP("192.168.1.786:22")
ftp.login("Marshmellow", "YourPasswordHere")
##ftp.dir("feed_1")
files = []
## F = open('Files.txt','a')
try:
files = ftp.nlst("feed_1")
for fname in files:
res = re.findall("2018-07-25", fname)
if res:
# Open the file for writing in binary mode
print 'Opening local file ' + ntpath.basename(fname)
file = open(ntpath.basename(fname), 'wb')
# Download the file a chunk at a time
# Each chunk is sent to handleDownload
# We append the chunk to the file and then print a '.' for progress
# RETR is an FTP command
print 'Getting ' + ntpath.basename(fname)
try:
ftp.retrbinary('RETR ' + ntpath.basename(fname), file.write)
except:
pass
# Clean up time
print 'Closing file ' + ntpath.basename(fname)
file.close()
print (fname)
## F.write(fname + '\n')
if not res:
continue
except ftplib.error_perm , resp:
if str(resp) == "550 No files found":
print "No files in this directory"
pass
else:
raise
## F.close()
Help Me Out if anyone knows what's wrong in this.
try:
ftp.cwd("feed_1")
files = ftp.nlst() for fname in files:
res = re.findall("2018-07-25", fname) if res:
# Open the file for writing in binary mode
print 'Opening local file ' + ntpath.basename(fname)
file = open(ntpath.basename(fname), 'wb')
i've just set the current working directory using ftp.cwd("feed_1") which i did the wrong way earlier like: files = ftp.nlst("feed_1")
I'm trying to import a zip file in to Confluence with the RPC importSpace object, but it keeps giving errors.. Atlassian has the following documentation that you can use for this:
public boolean importSpace(String token, byte[] importData)
I have created a small Pyhton script that loops through a file where the zip filenames are saved:
#!/usr/bin/python
import xmlrpclib
import time
import urllib
confluence2site = "https://confluence"
server = xmlrpclib.ServerProxy(confluence2site + '/rpc/xmlrpc')
username = ''
password = ''
token = server.confluence2.login(username, password)
loginString = "?os_username=" + username + "&os_password=" + password
filelist = ""
start = True
with open('exportedspaces.txt') as f:
for file in f:
try:
print file
f = open(os.curdir+ "\\zips\\" + file, 'rb')
fileHandle = f.read()
f.close()
server.confluence2.importSpace(token, xmlrpclib.Binary(fileHandle))
except:
print file + " failed to restore"
failureList.append(file)
Where does it goes wrong?
I have this code, but I can't figure out how to change directory on the server before uploading files.
Can anyone help me out?
import ftplib
import os
server = 'enter your servername here'
username = 'root'
password = 'passowrd'
myFTP = ftplib.FTP(server, username, password)
myPath = r'C:\path_of_the_folder_goes_here'
def uploadThis(path):
files = os.listdir(path)
os.chdir(path)
for f in files:
if os.path.isfile(path + r'\{}'.format(f)):
fh = open(f, 'rb')
myFTP.storbinary('STOR %s' % f, fh)
fh.close()
elif os.path.isdir(path + r'\{}'.format(f)):
myFTP.mkd(f)
myFTP.cwd(f)
uploadThis(path + r'\{}'.format(f))
myFTP.cwd('..')
os.chdir('..')
uploadThis(myPath)
Use FTP.cwd method:
myFTP.cwd('/remote/path')
before you call
uploadThis(myPath)