Create byte array for zip file - python

I'm trying to import a zip file in to Confluence with the RPC importSpace object, but it keeps giving errors.. Atlassian has the following documentation that you can use for this:
public boolean importSpace(String token, byte[] importData)
I have created a small Pyhton script that loops through a file where the zip filenames are saved:
#!/usr/bin/python
import xmlrpclib
import time
import urllib
confluence2site = "https://confluence"
server = xmlrpclib.ServerProxy(confluence2site + '/rpc/xmlrpc')
username = ''
password = ''
token = server.confluence2.login(username, password)
loginString = "?os_username=" + username + "&os_password=" + password
filelist = ""
start = True
with open('exportedspaces.txt') as f:
for file in f:
try:
print file
f = open(os.curdir+ "\\zips\\" + file, 'rb')
fileHandle = f.read()
f.close()
server.confluence2.importSpace(token, xmlrpclib.Binary(fileHandle))
except:
print file + " failed to restore"
failureList.append(file)
Where does it goes wrong?

Related

How to add strings in the file at the end of all lines

I am trying to download files using python and then add lines at the end of the downloaded files, but it returns an error:
f.write(data + """<auth-user-pass>
TypeError: can't concat str to bytes
Edit: Thanks, it works now when I do this b"""< auth-user-pass >""", but I only want to add the string at the end of the file. When I run the code, it adds the string for every line.
I also tried something like this but it also did not work: f.write(str(data) + "< auth-user-pass >")
here is my full code:
import requests
from multiprocessing.pool import ThreadPool
def download_url(url):
print("downloading: ", url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]
save_path = 'ovpns/'
complete_path = os.path.join(save_path, file_name)
print(complete_path)
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(complete_path, 'wb') as f:
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""")
return url
servers = [
"us-ca72.nordvpn.com",
"us-ca73.nordvpn.com"
]
urls = []
for server in servers:
urls.append("https://downloads.nordcdn.com/configs/files/ovpn_legacy/servers/" + server + ".udp1194.ovpn")
# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
for r in results:
print(r)
EDIT: Thanks, it works now when I do this b"""< auth-user-pass >""", but I only want to add the string at the end of the file. When I run the code, it adds the string for every line.
Try this:
import requests
from multiprocessing.pool import ThreadPool
def download_url(url):
print("downloading: ", url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]
save_path = 'ovpns/'
complete_path = os.path.join(save_path, file_name)
print(complete_path)
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(complete_path, 'wb') as f:
for data in r:
f.write(data)
return url
servers = [
"us-ca72.nordvpn.com",
"us-ca73.nordvpn.com"
]
urls = []
for server in servers:
urls.append("https://downloads.nordcdn.com/configs/files/ovpn_legacy/servers/" + server + ".udp1194.ovpn")
# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
with open(complete_path, 'ab') as f:
f.write(b"""<auth-user-pass>
username
password
</auth-user-pass>""")
for r in results:
print(r)
You are using binary mode, encode your string before concat, that is replace
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""")
using
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""".encode())
You open the file as a write in binary.
Because of that you cant use normal strings like the comment from #user56700 said.
You either need to convert the string or open it another way(ex. 'a' = appending).
Im not completly sure but it is also possible that the write binary variant of open the data of the file deletes. Normally open with write deletes existing data, so its quite possible that you need to change it to 'rwb'.

How to create a loop with FOR in a temporary file?

I am working with an encrypted file, but I can't manage to create a loop with for in order to read it before it get closed and removed.
My intention is to read the data given in the encrypted file and loop it to assign each line to a variable.
Whenever I execute my code, Python just goes straight to finish, without working with the decrypted info; I believe it is because the with command close it before the loop starts.
This is what I want, not working, no errors either:
with open(input_file, 'rb') as fp:
data = fp.read()
fernet = Fernet(key)
encrypted = fernet.decrypt(data)
with tempfile.TemporaryFile() as fp:
fp.write(encrypted)
for url in fp: #Python ignores the tempfile. I belive it is closed in the previous line.
segment = url.strip()
url = 'https://docs.python.org/3.3/tutorial/' + segment
filename = segment + '.html'
filePath = pjoin('Data/' + filename)
response = urlopen(url)
webContent = response.read()
html_content = urlopen(url).read()
matches = re.findall(b'string', html_content);
if len(matches) == 0:
print(segment + ' unchanged.')
else:
with open(filePath, 'wb') as w:
w.write(webContent)
This is the working code (Sorry, tried to make it shorter but couldn't):
with open(input_file, 'rb') as fp:
data = fp.read()
fernet = Fernet(key)
encrypted = fernet.decrypt(data)
with open(output_file, 'wb') as fp:
fp.write(encrypted)
with open(output_file) as fp:
for url in fp:
segment = url.strip()
url = 'https://docs.python.org/3.3/tutorial/' + segment
filename = segment + '.html'
filePath = pjoin('Data/' + filename)
response = urlopen(url)
webContent = response.read()
html_content = urlopen(url).read()
matches = re.findall(b'string', html_content);
if len(matches) == 0:
print(segment + ' unchanged.')
else:
with open(filePath, 'wb') as w:
w.write(webContent)
Header for both examples (apart to make it shorter):
#python 3.6.6
from urllib.request import urlopen
import urllib.request
from os.path import join as pjoin
import re, os, sys, tempfile, six, ctypes, time, fileinput
from cryptography.fernet import Fernet
print("[*] Checking list.dat for consistency . . .")
key = b'wTmVBRLytAmlfkctCuEf59K0LDCXa3sGas3kPg3r4fs=' #Decrypt list.dat
input_file = 'List.dat'
output_file = 'List.txt'
List.txt content:
errors
classes
stdlib
Any hints?
The problem is that once you have written to the file, the "file pointer" is at the end of the file. There's nothing to read.
You can use the seek method to reposition the file pointer at the beginning. Alternatively, closing and re-opening the file (as in your working code) will position the pointer at the beginning of the file.
#LarryLustig pretty much answered why your code wasn't working, but IMO if you eliminate the temp file altogether (which shouldn't be necessary) you don't even need to worry about the cursor. See below commented changes on your desired code.
# We'll use os.linesep to get the line terminator string for your os.
import os
...
with open(input_file, 'rb') as fp:
data = fp.read()
fernet = Fernet(key)
# decode your decrypted bytes into strings. Change 'utf-8' into whichever file encoding you're using if necessary.
decrypted = fernet.decrypt(data).decode('utf-8')
# Don't write to a temp file
# Iterate directly on each line of the extracted data
for url in decrypted.split(os.linesep):
segment = url.strip()
url = 'https://docs.python.org/3.3/tutorial/' + segment
filename = segment + '.html'
filePath = pjoin('Data/' + filename)
response = urlopen(url)
webContent = response.read()
html_content = urlopen(url).read()
matches = re.findall(b'string', html_content);
if len(matches) == 0:
print(segment + ' unchanged.')
else:
with open(filePath, 'wb') as w:
w.write(webContent)
Alternatively, if you know for sure what is the line terminator used in the file (e.g. \r\n, or \n) then you can eliminate using os.linesep altogether.

Get python code to persist after IndexError

I am querying an API from a website. The API will be down for maintenance from time to time and also, there may not be data available for querying at times. I have written the code to keep forcing the program to query the API even after an error, however it doesn't seem to be working.
The following is the code:
import threading
import json
import urllib
from urllib.parse import urlparse
import httplib2 as http #External library
import datetime
import pyodbc as db
import os
import gzip
import csv
import shutil
def task():
#Authentication parameters
headers = { 'AccountKey' : 'secret',
'accept' : 'application/json'} #this is by default
#API parameters
uri = 'http://somewebsite.com/' #Resource URL
path = '/something/TrafficIncidents?'
#Build query string & specify type of API call
target = urlparse(uri + path)
print(target.geturl())
method = 'GET'
body = ''
#Get handle to http
h = http.Http()
#Obtain results
response, content = h.request(target.geturl(), method, body, headers)
api_call_time = datetime.datetime.now()
filename = "traffic_incidents_" + str(datetime.datetime.today().strftime('%Y-%m-%d'))
createHeader = 1
if os.path.exists(filename + '.csv'):
csvFile = open(filename + '.csv', 'a')
createHeader = 0
else:
#compress previous day's file
prev_filename = "traffic_incidents_" + (datetime.datetime.today()-datetime.timedelta(days=1)).strftime('%Y-%m-%d')
if os.path.exists(prev_filename + '.csv'):
with open(prev_filename + '.csv' , 'rb') as f_in, gzip.open(prev_filename + '.csv.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(prev_filename + '.csv')
#create new csv file for writing
csvFile = open(filename + '.csv', 'w')
#Parse JSON to print
jsonObj = json.loads(content)
print (json.dumps(jsonObj, sort_keys=True, indent=4))
with open("traffic_incidents.json","w") as outfile:
#Saving jsonObj["d"]
json.dump(jsonObj, outfile, sort_keys=True, indent=4,ensure_ascii=False)
for i in range(len(jsonObj["value"])):
jsonObj["value"][i]["IncidentTime"] = jsonObj["value"][i]["Message"].split(' ',1)[0]
jsonObj["value"][i]["Message"] = jsonObj["value"][i]["Message"].split(' ',1)[1]
jsonObj["value"][i]["ApiCallTime"] = api_call_time
#Save to csv file
header = jsonObj["value"][0].keys()
csvwriter = csv.writer(csvFile,lineterminator='\n')
if createHeader == 1:
csvwriter.writerow(header)
for i in range(len(jsonObj["value"])):
csvwriter.writerow(jsonObj["value"][i].values())
csvFile.close()
t = threading.Timer(120,task)
t.start()
while True:
try:
task()
except IndexError:
pass
else:
break
I get the following error and the program stops:
"header = jsonObj["value"][0].keys()
IndexError: list index out of range"
I would like the program to keep running even after the IndexError has occured.
How can I edit the code to achieve that?

Show progressbar for ftp upload in python

So I'm writing a script that will search a folder recursively for a .mkv file and upload it to my NAS. I have the script working but I can't see the progress. I imported this progressbar I found on github and was able to use the demo to see it work. It is what I want however the FTP example they included is to retrieve a file from the server. I need to upload.
How do I get the uploaded amount on an interval so I can run an update to the progress bar?
Bellow is the code I have that works for the upload
import os
import ftplib
import ntpath
ntpath.basename("a/b/c")
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
from glob import glob
FileTransferList = [y for x in os.walk('/tmp/rippedMovies') for y in glob(os.path.join(x[0], '*.mkv'))]
global ftp
def FTP_GLOB_transfer(URL, UserName, Password):
ftp = ftplib.FTP(URL, UserName, Password) # connect to host, default port
print URL, UserName, Password
for file in FileTransferList:
FileName = path_leaf(file)
print file
TheFile = open(file, 'r')
ftp.storbinary('STOR ' + FileName, TheFile, 1024)
TheFile.close()
ftp.quit()
ftp = None
FTP_GLOB_transfer('<IP>', '<USER>', '<PASSWORD>')
I figured it out. I decided to use TQDM as I found some easier to read documentation for it. I was assuming that storbinary() had to have a return or something to tell it's progress, just didn't know I was looking for a callback.
Anyways I added a new import from tqdm import tqdm
I added this filesize = os.path.getsize(file) to get the file size of the file
Then I replaced ftp.storbinary('STOR ' + FileName, TheFile, 1024) with this code
with tqdm(unit = 'blocks', unit_scale = True, leave = False, miniters = 1, desc = 'Uploading......', total = filesize) as tqdm_instance:
ftp.storbinary('STOR ' + FileName, TheFile, 2048, callback = lambda sent: tqdm_instance.update(len(sent)))
And overall the new working code looks like
import os
import ftplib
import ntpath
from tqdm import tqdm
ntpath.basename("a/b/c")
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
from glob import glob
FileTransferList = [y for x in os.walk('/tmp/rippedMovies') for y in glob(os.path.join(x[0], '*.mkv'))]
global ftp
def FTP_GLOB_transfer(URL, UserName, Password):
ftp = ftplib.FTP(URL, UserName, Password) # connect to host, default port
print URL, UserName, Password
for file in FileTransferList:
FileName = path_leaf(file)
filesize = os.path.getsize(file)
print file
TheFile = open(file, 'r')
with tqdm(unit = 'blocks', unit_scale = True, leave = False, miniters = 1, desc = 'Uploading......', total = filesize) as tqdm_instance:
ftp.storbinary('STOR ' + FileName, TheFile, 2048, callback = lambda sent: tqdm_instance.update(len(sent)))
TheFile.close()
ftp.quit()
ftp = None
It now outputs as
/tmp/rippedMovies/TestMovie.mkv
Uploading......: 51%|████████████████████▉ | 547M/1.07G
[00:05<00:14, 36.8Mblocks/s]

Simple encryption and decryption of strings by using bz2 in Python

I am trying to create a file with encrypted username, password and computer name when the user logs in. And the same data I want to use again to authenticate the data but by decrypting them first. I am trying to use something that is built in python and simple.
import os
import bz2
os.chdir("D:/test")
encrypted_username = bz2.compress('username')
encrypted_password = bz2.compress('password')
computer_name = os.environ['COMPUTERNAME']
encrypted_computer_name = bz2.compress(computer_name)
f = open("Session.dat", "w")
f.write(encrypted_username + '\n')
f.write(encrypted_password + '\n')
f.write(encrypted_computer_name)
f.close()
f = open("Session.dat", "r")
data = f.read()
d_data = bz2.decompress(data)
f.close()
print(d_data)
But when I decrypt the data in the file and print it. I get the answer as below. Why am I not getting the password and computer name?? Thank you.
username
The code compressed the strings separately. You should read all lines and decompress them line by line as alecxe commented. But that is not practical because compressed data could contians newline(s).
Instead combine strings (In the following code, I used NULL byte \0 as separator), then compress the combined string.
Decompress: After decompress, split combined strings using the same separator.
import os
import bz2
#os.chdir("D:/test")
username = 'username'
password = 'password'
computer_name = os.environ['COMPUTERNAME']
compressed = bz2.compress(username + '\0' + password + '\0' + computer_name)
with open("Session.dat", "wb") as f:
f.write(compressed)
with open("Session.dat", "rb") as f:
d_data = bz2.decompress(f.read())
print(d_data.split('\0'))
BTW, you should use binary mode to read/write compressed data.

Categories