I have a webparser on the "socket" library (S03) and have a module for parse this dict for unzip needle information(before_json)
code S03
# Init .env
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
#init
secret_token = os.environ['secret_token']
#time
seconds = time.time()
local_time = time.ctime(seconds)
#HDRS
HDRS = 'HTTP/1.1 200 OK\r\nContent-Type: text/html; charset=utf-8\r\n\r\n'
HDRS_404 = 'HTTP/1.1 404 OK\r\nContent-Type: text/html; charset=utf-8\r\n\r\n'
# create webserver socket
socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_server.bind(('ip', 8888))
socket_server.listen(356)
socket_server.settimeout(5)
#Start procesing incoming json
def start_my_server():
#def of compare secret key
def load_secret(secret_token, data):
try:
# slack_message_pipe(step=f'I LOAD JSON')
print('load_secret')
key = str(data)
key = re.findall(f'X-Gitlab-Token:...............', key)
print(key)
key = str(key).replace("['X-Gitlab-Token: ", '')
key = str(key).replace("']", '')
print(key)
print(secret_token, ' !!! ', key)
if secret_token == key:
socket_server.settimeout(None)
try_to_verification(key)
else:
fail_verifivcation()
except Exception as e:
print(e)
return
# slack_message_pipe(step=f'start_my_server.load_secret {e}')
def try_to_verification(key):
try:
print(key, 'key try_to_verification')
client_socket.send(HDRS.encode('utf-8'))
client_socket.shutdown(socket.SHUT_WR)
# with open(f"path to file('{local_time}').json", 'w+') as output_file:
# json.dump(data, output_file)
with open(f"path to file", 'w+') as file:
json.dump(data, file)
file.close()
print('next step')
json_dump_for_proj(data)
except Exception as e:
print(e)
return
# slack_message_pipe(step=f'start_my_server.try_to_verification {e}')
def fail_verifivcation():
try:
print('Not find')
client_socket.send(HDRS_404.encode('utf-8'))
client_socket.shutdown(socket.SHUT_WR)
addresses = open('ipPOST', 'a')
addresses.write(str(address) + f'{local_time}\n')
addresses.close()
except Exception as e:
print(e)
return
# slack_message_pipe(step=f'start_my_server.fail_verifivcation {e}')
while True:
print('start loop')
try:
print('try loop')
while True:
print('Working...')
client_socket, address = socket_server.accept()
print('loop', address)
data = client_socket.recv(1048576).decode('utf-8')
# slack_message_pipe(step=f'I GOT JSON')
load_secret(secret_token, data)
except Exception as e:
# slack_message_pipe(step=f'start_my_server.socket.error {e}')
print(f'pass try {e}')
fail_verifivcation()
code before_json
home_path = os.environ['home']
# time
seconds = time.time()
local_time = time.ctime(seconds)
def json_dump_for_proj(data):
os.chdir(home_path)
try:
data = str(data).replace('null', '0')
# Find head json
data = re.sub('POST / HTTP/1.1\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n', '', data)
# data = re.sub(',total_commits_count.*', '}', data)
data = re.sub('POST / HTTP/1.1\r\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n', '', data)
# data = re.sub('.total_commits_count.*', '}', data)
data = re.sub('POST.*\r\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n', '', data)
data = re.sub('"POST / HTTP/1.1\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n', '', data)
data = re.sub('"POST / HTTP/1.1\r\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n', '', data)
data = re.sub('"POST.*\r\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n.*\n', '', data)
data = json.loads(data)
# parse needly info
# Branch
# print(data['ref'])
branch = data['ref']
# print(data['commits'])
for keys in data['commits']:
# id
id_hash = keys['id']
# author
# name
name = keys['author']['name']
# email
email = keys['author']['email']
# files
# added
added = keys['added']
for _ in range(len(added) + 1):
for j in added:
if 'path to file' not in j:
added.remove(j)
# print('path to file' not in added[-1])
# modif
modified = keys['modified']
for _ in range(len(modified) + 1):
for k in modified:
if '' not in k:
print(k)
modified.remove(k)
print(id_hash, name, email, branch, modified, sep='\n' + '*' * 100 + '\n')
list_of = [(name, email), added, modified, id_hash]
# write_list(list_of)
# print(not modified and not added)
message_dict = {"name": name, "email": email, "modified": modified, "added": added}
if not modified and not added:
slack_message_pipe_good(
step=f' \nI got commit by {message_dict.get("name")}\nEmail: {message_dict.get("email")}\n\nBut it is empty, pass')
return
try:
# slack_message_pipe_good(step=f' \nI got commit by {message_dict.get("name")}\nEmail: {message_dict.get("email")}\n\nInside this commit, the following changes\nadded:{message_dict.get("added")}\nmodified:{message_dict.get("modified")}\n\n I am going to the next step')
git_checkout(id_hash, message_dict)
except Exception as e:
slack_message_pipe(step=f'ERROS ON STEP before_deploy_Parse_Json.json_dump_for_proj: {e}')
return
except Exception as e:
with open(f'{local_time}.json',
'w+') as data_failure:
data_failure.write(data)
data_failure.close()
slack_message_pipe(step=f' before_deploy_Parse_Json.json_dump_for_proj {e}')
def write_list(list_of):
try:
with open(f'path to file', 'w+') as output_file:
output_file.write(str(list_of) + '\n')
output_file.close()
print('I all write')
except Exception as e:
slack_message_pipe(step=f' before_deploy_Parse_Json.write_list {e}')
# with open('data.json', 'r') as json_file:
# data = json.load(json_file)
# json_dump_for_proj(data)
if __name__ == '__main__':
print('Parse_Json')
problem:
I got whole json from S03 and after i begining parse him in before_json, but not ussualy it whole, some king of json gone, size of gone block separated\
Related
I need to add client side caching functionality to my client, I don't need to implement any replacement or validation policies.Just be able to write responses to the disk (i.e., the cache) and fetch them from the disk when I get a cache hit. For this, I need to implement some internal data structure in my client to keep track of which objects are cached and where they are on the disk. I can keep this data structure in the main memory; there is no need to make it persist across shutdowns.
Here's the Code I tried to write the caching part but it isn't working I need your help please:
import socket
import selectors
import os
commands = []
requests = []
request_methods = []
filenames = []
host_names = []
port_numbers = []
cached_objects = {}
sel = selectors.DefaultSelector()
with open('commands.txt', encoding='UTF-8', mode='r') as f:
commands = f.readlines()
def parse_file():
for count in range(len(commands)):
request_method = commands[count].split(' ')[0]
request_methods.append(request_method)
filename = commands[count].split(' ')[1]
filenames.append(filename)
host_name = commands[count].split(' ')[2].strip('\n')
host_names.append(host_name)
try:
port_number = commands[count].split(' ')[3].strip('\n')
port_numbers.append(port_number)
except Exception as e:
port_number = 80
port_numbers.append(port_number)
requests.append(generate_request(request_method, filename, host_name))
def generate_request(request_method, filename, host_name):
request = ''
if request_method == "GET":
request += request_method + ' /' + filename + ' HTTP/1.0\r\n'
request += 'Host:' + host_name + '\r\n\r\n'
print(request)
elif request_method == "POST":
request += request_method + ' /' + filename + ' HTTP/1.0\r\n'
request += 'Host:' + host_name + '\r\n'
request += '\r\n'
#Reading POST File From ClientFiles
print(filename)
f = open(filename,"r")
request += f.read()
print(request)
return request
def start_connections(host, port, request, filename, request_method):
server_addr = (host, port)
events = selectors.EVENT_READ | selectors.EVENT_WRITE
# connid = count + 1
print(f"Starting connection to {server_addr}")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect_ex(server_addr)
sock.sendall(bytes(request, 'UTF-8'))
data = sock.recv(4092)
response = data.decode()
cached_objects[request] = response # <<<<<<<<<<<<<<<<<<
# TODO: Fix Name of the file
fileReady = "Clientfiles/"
head, tail = os.path.split(filename)
fileReady += tail
print("\n RESPONSE " + response + "\n response end")
try:
if request_method == "GET":
payload = response.split('\r\n\r\n', 1)[1]
print("MyPayload " + payload)
f = open(fileReady, "w")
f.write(payload)
f.close()
except Exception as e:
print(e)
print("From Server :", response)
print("\n\n")
sel.register(sock, events)
def check_cache(request):
for i in range(len(commands)):
request = requests[i]
if request in cached_objects.keys():
response = cached_objects[request] # <<<<<<<<<<<<<<<<<<
print("\n RESPONSE From cache " + response + "\n response end") # <<<<<<<<<<<<<<<<<<
# i = i + 1 # <<<<<<<<<<<<<<<<<<
else:
start_connections(host_names[i], int(port_numbers[i]), requests[i], filenames[i], request_methods[i])
parse_file()
check_cache(generate_request.request)
Why am I getting the error argument for 's' must be a bytes object when trying to run the lambda function? I'm following the usage example but I'm getting this error. Any explanation to this issue and how to resolve it?
{
"errorMessage": "Failed sending data.\nERROR: argument for 's' must be a bytes object",
"errorType": "Exception",
"stackTrace": [
" File \"/var/task/AlertMetricSender.py\", line 5, in lambda_handler\n sender.send()\n",
" File \"/var/task/modules/ZabbixSender.py\", line 91, in send\n self.__active_checks()\n",
" File \"/var/task/modules/ZabbixSender.py\", line 79, in __active_checks\n response = self.__request(request)\n",
" File \"/var/task/modules/ZabbixSender.py\", line 59, in __request\n raise Exception(\"Failed sending data.\\nERROR: %s\" % e)\n"
]
}
ZabbixSender.py:
#
# For sending metric value to zabbix server.
#
# You must create item as "zabbix trapper" on server.
# Because the server must be connected to agent:10050, if it is selected "zabbix agent".
#
# Usage:
# from modules.ZabbixSender import ZabbixSender
# ZABBIX_HOST = "zabbix.example.com"
# ZABBIX_PORT = 10051
# sender = ZabbixSender(ZABBIX_HOST, ZABBIX_PORT)
# sender.add("example-hostname-01", "healthcheck", 1)
# sender.add("example-hostname-01", "item.keyname", 0.123)
# sender.add("example-hostname-02", "item.keyname", 1234)
# sender.send()
#
import socket
import struct
import time
import json
class ZabbixSender:
log = True
def __init__(self, host='127.0.0.1', port=10051):
self.address = (host, port)
self.data = []
def __log(self, log):
if self.log: print(log)
def __connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.sock.connect(self.address)
except:
raise Exception("Can't connect server.")
def __close(self):
self.sock.close()
def __pack(self, request):
string = json.dumps(request)
header = struct.pack('<4sBQ', 'ZBXD', 1, len(string))
return header + string
def __unpack(self, response):
header, version, length = struct.unpack('<4sBQ', response[:13])
(data, ) = struct.unpack('<%ds'%length, response[13:13+length])
return json.loads(data)
def __request(self, request):
self.__connect()
try:
self.sock.sendall(self.__pack(request))
except Exception as e:
raise Exception("Failed sending data.\nERROR: %s" % e)
response = ''
while True:
data = self.sock.recv(4096)
if not data:
break
response += data
self.__close()
return self.__unpack(response)
def __active_checks(self):
hosts = set()
for d in self.data:
hosts.add(d['host'])
for h in hosts:
request = {"request":"active checks", "host":h}
self.__log("[active check] %s" % h)
response = self.__request(request)
if not response['response'] == 'success': self.__log("[host not found] %s" % h)
def add(self, host, key, value, clock=None):
if clock is None: clock = int(time.time())
self.data.append({"host":host, "key":key, "value":value, "clock":clock})
def send(self):
if not self.data:
self.__log("Not found sender data, end without sending.")
return False
self.__active_checks()
request = {"request":"sender data", "data":self.data}
response = self.__request(request)
result = True if response['response'] == 'success' else False
if result:
for d in self.data:
self.__log("[send data] %s" % d)
self.__log("[send result] %s" % response['info'])
else:
raise Exception("Failed send data.")
return result
if __name__ == '__main__':
sender = ZabbixSender()
sender.add("gedowfather-example-01", "healthcheck", 1)
sender.add("gedowfather-example-01", "gedow.item", 1111)
sender.send()
AlertMetricSender.py:
from modules.ZabbixSender import ZabbixSender
def lambda_handler(event, context):
sender = ZabbixSender("10.10.10.10", 10051)
sender.add("Zabbix server", "lambda.test", 5)
sender.send()
The error is coming from struct.pack. You're not seeing that, because of your blanket try/except.
All socket activity is done in byte strings, not Unicode strings. You need this:
def __pack(self, request):
string = json.dumps(request).encode('utf-8')
header = b'ZBXD' + struct.pack('<BQ', 1, len(string))
return header + string
One subtle thing about this: You must convert to a bytes string BEFORE you do len(string). It's quite possible for the conversion to UTF-8 to increase the number of characters in the string.
AND I absolutely need to comment on this:
result = True if response['response'] == 'success' else False
What led you to write that? This is exactly the same as the much more natural reading:
result = response['response'] == 'success'
I am trying to save attachments of my outlook account emails to a local directory and the code was working perfectly till now, it starts showing error and I am not able to get this through. Can anybody please help with this?
TimeoutError: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond
Here is the code.
from classes.logfile import logger
from classes.config_ini import Config
import email
import imaplib
import email.mime.multipart
import os
import pandas as pd
import datetime
# This class is useful of fetching data from outlook emails by providing a subject line with files
class Outlook(Config):
def __init__(self):
super(Outlook, self).__init__()
self.username = None
self.password = None
self.imap = None
self.subject = None
self.file_name = None
self.s = None
self.att_path = "No attachment found"
def subject_line(self):
subject_read = pd.read_csv(self.section_value[0] + 'outlookEmails.csv')
subject = subject_read.iloc[:, :]
self.s = subject
self.subject = subject.iloc[:, 1]
self.file_name = subject.iloc[:, 0]
def close_connection(self):
return self.imap.close()
def login(self, username, password):
# IMAP Settings
self.username = username
self.password = password
print("signing in")
while True:
# Connect to the server
try:
self.imap = imaplib.IMAP4_SSL("outlook.office365.com", port=993)
r, d = self.imap.login(username, password)
assert r == 'OK', 'login failed'
print(" > Sign as ", d)
except imaplib.IMAP4.error:
print(" > Sign In ...")
continue
break
def inbox(self):
# selecting the inbox
typ, data = self.imap.select("Inbox")
print(typ, data)
num_msgs = int(data[0])
print('There are {} messages in INBOX'.format(num_msgs))
return self.imap.select("Inbox")
def email_check(self, download_folder):
# fetch the email body (RFC822) for the given ID
try:
for i, j in zip(self.subject, self.file_name):
print('Subject {}'.format(i))
# typ, msg_ids = self.imap.uid('search', None, 'SUBJECT {}'.format(i))
typ, msg_ids = self.imap.uid('search', None, '(SUBJECT "{}")'.format(i))
inbox_item_list = msg_ids[0].split()
most_recent = inbox_item_list[-1]
print(most_recent)
if typ == "OK":
ret, data = self.imap.uid('fetch', most_recent, '(RFC822)')
raw_data = data[0][1]
# converts byte literal to string removing b''
raw_data_string = raw_data.decode('utf-8')
msg = email.message_from_string(raw_data_string)
# downloading attachments
# print(msg)
print('Subject:' + msg['Subject'])
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
print("filename:" + filename)
filename = j
# if there is no filename, we create one with a counter to avoid duplicates
self.att_path = os.path.join(download_folder, filename)
# Check if its already there
# if not os.path.isfile(self.att_path):
fp = open(self.att_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
except (imaplib.IMAP4.error, TypeError) as e:
logger.error(str(e))
pass
# moving files to particular folder
# folders = [d for d in os.listdir(download_folder) if os.path.isdir(d)]
# files = [f for f in os.listdir(download_folder) if os.path.isfile(f)]
# for d in folders:
# for f in files:
# if d in f:
# new_loc =
def main(self):
self.subject_line()
self.login('XXX', 'XXX')
self.inbox()
logger.info('start downloading emails at ' + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
self.email_check(self.section_value[1])
self.close_connection()
logger.info('Emails Downloaded ' + str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M")))
if __name__ == "__main__":
obj = Outlook()
obj.main()
Can somebody help me with this?
I'm using the multiprocessing.dummy module to do some concurrent processing. I'm making HTTP requests, and there is a possibility that the object will not have any data returned. In this case I need to capture the AttributeError and move on.
I tried capturing it in the object itself, and still received the error, the only thing that worked was a try/except on the pool.map call itself. I'm wondering why this is, and if this is the best way to do error handling for multiprocessing and map functions?
Here is some of my code for reference:
all_commits = []
projects = [Project(value['id']) for value in project_data.values()]
def process_projects(project):
if project.name in bad_names.keys():
project.name = bad_names[project.name]
project.return_results(rest, all_commits)
pool = ThreadPool(8)
pool.map(process_projects, projects)
pool.close()
pool.join()
print 'All data gathered.'
print 'Number of commits: {}'.format(len(all_commits))
fieldnames = get_fieldnames(
'ods_gerrit.staging_gerrit_commits',
settings.REDSHIFT_POSTGRES_INFO)
s3_file = ('staging_gerrit_commits_{}.csv.gz'.format(
date.today())
)
with gzip.open(s3_file, 'wb') as outf:
writer = DictWriter(
outf,
fieldnames=fieldnames,
extrasaction='ignore',
delimiter='|'
)
cnt = 0
pool = ThreadPool(8)
try:
pool.map(process_commits, all_commits)
except AttributeError:
pass
pool.close()
pool.join()
Then here is my Commit object code and the function that is being called by the map function:
class Commit(object):
def __init__(self, rev_id, change_id, full_id):
self.rev_id = rev_id
self.change_id = change_id
self.full_id = full_id
def clean_data(self, _dict):
for key, value in _dict.items():
if isinstance(value, dict):
self.clean_data(_dict[key])
else:
try:
_dict[key] = _dict[key].encode(
'utf_8',
'replace'
).encode('string_escape').replace('|', '[pipe]')
except AttributeError:
continue
def get_data(self, ger_obj):
print 'Getting data for a commit for {f_id}'.format(
f_id=self.full_id
)
endpoint = (r'/changes/{c_id}/revisions/{r_id}/commit'.format(
c_id=self.change_id,
r_id=self.rev_id
))
try:
self.data = ger_obj.get(endpoint)
except HTTPError:
try:
endpoint = (r'/changes/{f_id}/revisions/{r_id}/commit'.format(
f_id=self.full_id,
r_id=self.rev_id
))
self.data = ger_obj.get(endpoint)
except HTTPError:
logging.warning('Neither endpoint returned data: {ep}'.format(
ep=endpoint
))
raise HTTPError()
except ReadTimeout:
logging.warning('Read Timeout occurred for a commit. Endpoint: '
'{ep/}'.format(ep=endpoint))
return
self.data['change_id'] = self.change_id
self.data['proj_branch_id'] = self.full_id
self.data['revision_id'] = self.rev_id
self.data['commitid'] = self.data.get('commit')
self.data['name'] = self.data.get('committer')['name']
self.data['email'] = self.data.get('committer')['email']
self.data['date'] = self.data.get('committer')['date']
hash = md5()
hash.update(json.dumps(self.data).encode('utf-8'))
self.data['etl_checksum_md5'] = hash.hexdigest()
self.data['etl_process_status'] = settings.ETL_PROCESS_STATUS
self.data['etl_datetime_local'] = settings.ETL_DATETIME_LOCAL
self.data['etl_pdi_version'] = settings.ETL_PDI_VERSION
self.data['etl_pdi_build_version'] = settings.ETL_PDI_BUILD_VERSION
self.data['etl_pdi_hostname'] = settings.ETL_PDI_HOSTNAME
self.data['etl_pdi_ipaddress'] = settings.ETL_PDI_IPADDRESS
self.clean_data(self.data)
def write_data(self, writer):
print 'Writing a commit for {f_id}'.format(f_id=self.full_id)
writer.writerow(self.data)
And the controller function:
def process_commits(commit):
print 'On commit #{}'.format(cnt)
unique_id = commit.change_id + commit.rev_id
if not id_search(unique_ids, unique_id):
try:
commit.get_data(rest)
except HTTPError:
pass
try:
commit.write_data(writer=writer)
except UnicodeEncodeError:
logging.warning(
'{data} caused a Unicode Encode Error.'.format(
data=commit.data
))
pass
global cnt
cnt += 1
So the curl command I'm using is as follows:
cmd = "curl --write-out %{http_code} -X PUT -T " + self.basedir + putfile + " -# -o /dev/null " + self.uri + "/" + self.dist + "/" + putfile
I'd like to change this from invoking a system command to using pycurl. This way I can have more granular control over it and ultimately implement a progress bar for it. However, when I try and convert to python, my resulting script fails. Here is my efforts towards a python script:
f = open(filepath, "rb")
fs = os.path.getsize(filepath)
c = pycurl.Curl()
c.setopt(c.URL, target_url)
c.setopt(c.HTTPHEADER, ["User-Agent: Load Tool (PyCURL Load Tool)"])
c.setopt(c.PUT, 1)
c.setopt(c.READDATA, f)
c.setopt(c.INFILESIZE, int(fs))
c.setopt(c.NOSIGNAL, 1)
c.setopt(c.VERBOSE, 1)
c.body = StringIO()
c.setopt(c.WRITEFUNCTION, c.body.write)
try:
c.perform()
except:
import traceback
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
f.close()
c.close()
sys.stdout.write(".")
sys.stdout.flush()
Here's what that outputs:
* About to connect() to ************ port 8090 (#0)
* Trying 16.94.124.53... * connected
> PUT /incoming/ HTTP/1.1
Host: ***********
Accept: */*
User-Agent: Load Tool (PyCURL Load Tool)
Content-Length: 21
Expect: 100-continue
< HTTP/1.1 100 Continue
* We are completely uploaded and fine
< HTTP/1.1 500 Internal Server Error
< Content-type: text/html
* no chunk, no close, no size. Assume close to signal end
<
Thanks in advance for you help!
I've did uploading working module, you can find your answers looking in code.
And you can find almost all answers regarding pycurl by digging libcurl examples and Docs.
'''
Created on Oct 22, 2013
#author: me
'''
import pycurl
import os
import wx
import sys
import hashlib
from cStringIO import StringIO
def get_file_hash(full_filename):
BLOCKSIZE = 65536
hasher = hashlib.md5()
with open(full_filename, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
class FtpUpload(object):
def __init__(self, server, username, password, **items):
self.server = server
self.username = username
self.password = password
self.gauge = items.get("gauge")
self.sb_speed = items.get("sb_speed")
self.upload_file_size = items.get("upload_file_size")
self.upload_file_speed = items.get("upload_file_speed")
self.filesize = 0
self.ftp_filehash = '0'
def sizeToNiceString(self, byteCount):
for (cutoff, label) in [(1024*1024*1024, "GB"), (1024*1024, "MB"), (1024, "KB")]:
if byteCount >= cutoff:
return "%.2f %s" % (byteCount * 1.0 / cutoff, label)
if byteCount == 1:
return "1 byte"
else:
return "%d bytes" % byteCount
def initRange(self, filesize):
self.filesize = filesize
self.gauge.SetRange(filesize)
def updateValue(self, upload_d):
upload_d_int = int(upload_d)
self.gauge.SetValue(upload_d_int)
upload_d_str = self.sizeToNiceString(upload_d)
upload_percent = int((upload_d*100)/self.filesize)
upload_d_status = "{0}/{1} ({2}%)".format(upload_d_str, self.sizeToNiceString(self.filesize), upload_percent)
self.sb_speed.SetStatusText(upload_d_status, 1)
self.upload_file_size.SetLabel(upload_d_status)
self.upload_file_speed.SetLabel(upload_d_str)
def progress(self, download_t, download_d, upload_t, upload_d):
self.updateValue(upload_d)
def test(self, debug_type, debug_msg):
if len(debug_msg) < 300:
print "debug(%d): %s" % (debug_type, debug_msg.strip())
def ftp_file_hash(self, buf):
sys.stderr.write("{0:.<20} : {1}\n".format('FTP RAW ', buf.strip()))
ftp_filehash = dict()
item = buf.strip().split('\n')[0]
ext = item.split('.')[1]
if len(ext) > 3:
ftp_filename = item[:-33]
ftp_filehash = item[-32:]
self.ftp_filehash = ftp_filehash
def get_ftp_file_hash(self, filename):
c = pycurl.Curl()
list_file_hash = 'LIST -1 ' + filename + "_*"
sys.stderr.write("{0:.<20} : {1} \n".format('FTP command ', list_file_hash))
c.setopt(pycurl.URL, self.server)
c.setopt(pycurl.USERNAME, self.username)
c.setopt(pycurl.PASSWORD, self.password)
c.setopt(pycurl.VERBOSE, False)
c.setopt(pycurl.DEBUGFUNCTION, self.test)
c.setopt(pycurl.CUSTOMREQUEST, list_file_hash)
c.setopt(pycurl.WRITEFUNCTION, self.ftp_file_hash)
c.perform()
c.close()
def delete_ftp_hash_file(self, ftp_hash_file_old):
c = pycurl.Curl()
delete_hash_file = 'DELE ' + ftp_hash_file_old
sys.stderr.write("{0:.<20} : {1} \n".format('FTP command ', delete_hash_file))
c.setopt(pycurl.URL, self.server)
c.setopt(pycurl.USERNAME, self.username)
c.setopt(pycurl.PASSWORD, self.password)
c.setopt(pycurl.VERBOSE, False)
c.setopt(pycurl.DEBUGFUNCTION, self.test)
c.setopt(pycurl.CUSTOMREQUEST, delete_hash_file)
try:
c.perform()
except Exception as e:
print e
c.close()
def upload(self, full_filename, filesize):
self.initRange(filesize)
filename = os.path.basename(full_filename)
sys.stderr.write("filename: %s\n" % full_filename)
c = pycurl.Curl()
c.setopt(pycurl.USERNAME, self.username)
c.setopt(pycurl.PASSWORD, self.password)
c.setopt(pycurl.VERBOSE, False)
c.setopt(pycurl.DEBUGFUNCTION, self.test)
c.setopt(pycurl.NOBODY, True)
c.setopt(pycurl.HEADER, False)
ftp_file_path = os.path.join(self.server, os.path.basename(full_filename))
file_hash = get_file_hash(full_filename)
ftp_hash_file = ftp_file_path + "_%s" % file_hash
# Getting filesize if exist on server.
try:
c.setopt(pycurl.URL, ftp_file_path)
c.perform()
filesize_offset = int(c.getinfo(pycurl.CONTENT_LENGTH_DOWNLOAD))
except Exception as error_msg:
print error_msg
wx.MessageBox(str(error_msg), 'Connection error!',
wx.OK | wx.ICON_ERROR)
# Exit upload function.
return True
ftp_file_append = True
# Get ftp file hash.
self.get_ftp_file_hash(filename)
offset = filesize_offset == -1 and '0' or filesize_offset
sys.stderr.write("L_file hash : {0:.<60}: {1:<40}\n".format(filename, file_hash))
sys.stderr.write("F_file hash : {0:.<60}: {1:<40}\n".format(filename, self.ftp_filehash))
sys.stderr.write("{0:15} : {1:.>15}\n".format('filesize:', filesize))
sys.stderr.write("{0:15} : {1:.>15}\n".format('ftp_filesize', offset))
sys.stderr.write("{0:15} : {1:.>15}\n".format('to upload:', filesize - int(offset)))
# File not exist on FTP server.
if filesize_offset == -1:
# file not exist: uploading from offset zero.
ftp_file_append = False
filesize_offset = 0
# Local and FTP file size and files MD5 are the same.
elif filesize_offset == self.filesize and file_hash == self.ftp_filehash:
sys.stderr.write("--- File exist on server! ---\n\n")
self.upload_file_speed.SetLabel("File exist on server!")
self.sb_speed.SetStatusText("File exist on server!", 1)
# Check next filename.
return False
# Ftp file and local file different data.
elif file_hash != self.ftp_filehash:
ftp_file_append = False
filesize_offset = 0
ftp_hash_file_old = filename + "_" + self.ftp_filehash
# delete old hash file.
self.delete_ftp_hash_file(ftp_hash_file_old)
c.setopt(pycurl.FTPAPPEND, ftp_file_append)
c.setopt(pycurl.UPLOAD, True)
c.setopt(pycurl.PROGRESSFUNCTION, self.progress)
with open('filehash.txt', 'w') as f:
f.write(file_hash)
for item in ("filehash.txt", full_filename):
# dont show progress by default.
noprogress = True
# upload ftp_hash_file first.
ftp_url = ftp_hash_file
with open(item, "rb") as f:
# chages ftp_url and show progress values, add filesize_offset.
if item != "filehash.txt":
f.seek(filesize_offset)
noprogress = False
ftp_url = ftp_file_path
c.setopt(pycurl.URL, ftp_url)
c.setopt(pycurl.NOPROGRESS, noprogress)
c.setopt(pycurl.READFUNCTION, f.read)
try:
c.perform()
if item != "filehash.txt":
sys.stderr.write("{0:15} : {1:.>15}\n\n".format("size uploaded", int(c.getinfo(pycurl.SIZE_UPLOAD))))
except Exception as error_msg:
print error_msg
wx.MessageBox(str(error_msg), 'Connection error!',
wx.OK | wx.ICON_ERROR)
# Exit upload function.
return True
self.ftp_filehash = '0'
c.close()
if __name__ == '__main__':
pass