I want to decrypt many files and it's working perfectly (encrypt, decrypt).
My problem is that, if any file exists which is not is encrypted, I get an "invalidToken" error message, and the program is unable to decrypt the remaining encrypted files.
Here is my code
import os
from cryptography.fernet import Fernet
import sys
import warnings
InvalidToken = False
key = b'SpecialKindKey'
fernet = Fernet(key)
encr_file_list = []
path ="C:/users/NineTail/desktop/pokemon/go/"
#we shall store all the file names in this list
filelist = []
for root, dirs, files in os.walk(path):
for file in files:
#append the file name to the list
filelist.append(os.path.join(root,file))
#print all the file names
for name in filelist:
print(name)
for file in filelist:
with open(file, 'rb') as f:
file_content = f.read()
decrypted_content = fernet.decrypt(file_content)
with open(file, 'wb') as f:
f.write(decrypted_content)
Error message :
Traceback (most recent call last):
File "r3.py", line 36, in <module>
decrypted_content = fernet.decrypt(file_content)
File "C:\Program Files\Python38\lib\site-packages\cryptography\fernet.py", line 85, in decrypt
timestamp, data = Fernet._get_unverified_token_data(token)
File "C:\Program Files\Python38\lib\site-packages\cryptography\fernet.py", line 121, in _get_unverified_token_data
raise InvalidToken
cryptography.fernet.InvalidToken
Use try/except to try to decrypt. If decrypting fails, continue without writing.
...
from cryptography.fernet import Fernet, InvalidToken
...
for file in filelist:
with open(file, 'rb') as f:
file_content = f.read()
try:
decrypted_content = fernet.decrypt(file_content)
except InvalidToken:
continue
with open(file, 'wb') as f:
f.write(decrypted_content)
...
Perhaps try having a loop that repeats the number of times of files you have, and calls each file separately from the rest
Define a list of files, and store that in a variable
say:
num = #number of files
files = [file1.txt,file2.txt]
For i in range(num):
files[num] = open(item, 'r')
num += 1
Related
I am trying to read all .txt files in a folder and print them in a .csv file. If i have (in theory) unlimited .txt files with (in theory) a lot of stuff in it i want it in a table like Excel.
import os, csv
destinationfile = "meine.csv"
dictionary = {}
directory = os.path.normpath("C:/Users/Viktor/PycharmProjects/pythonProject1")
for subdir, dirs, files in os.walk(directory):
for file in files:
split = file.split(".", 1)
header = split[0]
readfiles = open(file, "r")
tlist = []
for line in readfiles:
split_line = line.split()
for split in split_line:
tlist.append(split)
dictionary.update({header: tlist})
if file.endswith(".txt"):
f=open(os.path.join(subdir, file),'r')
a = f.read()
split = file.split(".", 1)
header = split[0]
readfile = open(file, "r")
tlist = []
for line in readfile:
split_line = line.split()
for split in split_line:
tlist.append(split)
dictionary.update({header: tlist})
with open(destinationfile, "w", newline='') as csv_file:
writer = csv.writer(csv_file,dialect='excel',delimiter=";")
headers = list(dictionary)
writer.writerow(headers)
for entry in zip(*dictionary.values()):
writer.writerow(entry)
It should look something like this in the end:
001;002;003;
a;d;g
b;e;h
c;f;i
001/002/003 are the names of my .txt files and a,b,c,d,e... is the info in my .txt files.
But, when I run the code I get this error:
Traceback (most recent call last):
File "C:\Users\Viktor\PycharmProjects\pythonProject1\main.py", line 11, in <module>
readfiles = open(file, "r")
FileNotFoundError: [Errno 2] No such file or directory: '.gitignore'
Process finished with exit code 1
I have a folder which contains multiple subfolders, I want to browser all excel files end with xlsx and merger them into one single xlsx file with following code:
import os
import glob
for root, dirs, files in os.walk("D:/Test"):
for file in files:
if file.endswith(".xlsx"):
#print(os.path.join(root, file))
s = os.path.join(root, file)
print(s)
all_data = pd.DataFrame()
for f in s:
df = pd.read_excel(f)
all_data = all_data.append(df,ignore_index=True)
# now save the data frame
writer = pd.ExcelWriter('result.xlsx')
all_data.to_excel(writer,'sheet1')
writer.save()
While an error happens when it run:
Traceback (most recent call last):
File "<ipython-input-169-41c6d76207e7>", line 12, in <module>
df = pd.read_excel(f)
File "C:\Users\User\Anaconda3\lib\site-packages\pandas\util\_decorators.py", line 118, in wrapper
return func(*args, **kwargs)
File "C:\Users\User\Anaconda3\lib\site-packages\pandas\io\excel.py", line 230, in read_excel
io = ExcelFile(io, engine=engine)
File "C:\Users\User\Anaconda3\lib\site-packages\pandas\io\excel.py", line 294, in __init__
self.book = xlrd.open_workbook(self._io)
File "C:\Users\User\Anaconda3\lib\site-packages\xlrd\__init__.py", line 116, in open_workbook
with open(filename, "rb") as f:
FileNotFoundError: [Errno 2] No such file or directory: 'D'
Does someone know how to deal with this problem? Thanks.
Your problem is with df = pd.read_excel(f). What are the contents of f? It looks like Python thinks that it's 'D'.
This is because your for f in s: is just iterating over the string that you created with s = os.path.join(root, file). I think you want to be saving this in some container like so
paths = []
for root, dirs, files in os.walk("D:/Test"):
for file in files:
if file.endswith(".xlsx"):
#print(os.path.join(root, file))
s = os.path.join(root, file)
print(s)
paths.append(s)
all_data = pd.DataFrame()
for f in paths:
df = pd.read_excel(f)
all_data = all_data.append(df,ignore_index=True)
You can also reduce that initial for loop into a list comprehension with
paths = [os.path.join(root, file) for root, _, files in os.walk('D:/Test') for file in files if file.endswith('.xlsx')]
import os
import pandas as pd
listof_files = os.listdir()
current_file_name = os.path.basename(__file__)
#flag to make sure append is happening properly
count = 0
mainFrame = 0
for file in listof_files:
#To ignore the python script file for pd.read_excel
if((file != current_file_name) and (file.endswith(".xlsx"))):
tempdf = pd.read_excel(str(file))
if(count == 0):
mainFrame = tempdf.copy()
else:
mainFrame = pd.concat([mainFrame,tempdf])
count += 1
mainFrame.to_excel('final.xlsx',index=False)
You can do like this also, put the script in the folder where you have all the xlsx files, then run the script, it will fetch all the xlsx file and concat with each other, finally, a single excel file is formed.
I have a text file containing multiple lines beginning with a byte order mark. Passing encoding='utf-8-sig' to open removes the BOM at the start of the file but all subsequent BOMs remain. Is there a more correct way to remove these than this:
import codecs
filepath = 'foo.txt'
bom_len = len(codecs.BOM_UTF8)
def remove_bom(s):
s = str.encode(s)
if codecs.BOM_UTF8 in s:
s = s[bom_len:]
return s.decode()
try:
with open(filepath, encoding='utf-8-sig') as file_object:
for line in file_object:
line = line.rstrip()
line = remove_bom(line)
if line != '':
print([line[0]])
except FileNotFoundError:
print('No file found at ' + filepath)
I'm having similar problems.
This kinda helped me:
import codecs
with open(path, "rb") as infile:
bytecontent = infile.read()
bytecontent = bytecontent.replace(codecs.BOM_UTF8, b"")
I am trying to read print search for all files in a directory and store contents in each file in a list to be used.
My problem is when i use print to debug if the file exists, it prints out the current file or first file in the list. However, It complains that file is not found when i try to read from this file
import re
import os
# Program to extract emails from text files
def path_file():
#path = raw_input("Please enter path to file:\n> ")
path = '/home/holy/thinker/leads/'
return os.listdir('/home/holy/thinker/leads') # returns a list like ["file1.txt", 'image.gif'] # need to remove trailing slashes
# read a file as 1 big string
def in_file():
print path_file()
content = []
for a_file in path_file(): # ['add.txt', 'email.txt']
print a_file
fin = open(a_file, 'r')
content.append(fin.read()) # store content of each file
print content
fin.close()
return content
print in_file()
# this is the error i get
""" ['add.txt', 'email.txt']
add.txt
Traceback (most recent call last):
File "Extractor.py", line 24, in <module>
print in_file()
File "Extractor.py", line 17, in in_file
fin = open(a_file, 'r')
IOError: [Errno 2] No such file or directory: 'add.txt'
"""
The error I get is aboive
os.listdir will return you only file name. You have to directory name on before that file name.
Its trying to open add.txt in same directory where you ran your program. Please add directory name before file name.
def path_file():
#path = raw_input("Please enter path to file:\n> ")
path = '/home/holy/thinker/leads/'
return [os.path.join(path, x) for x in os.listdir(path)]
you should use the full path of the file you want to read.
so please do fin = open(os.path.join(r'/home/holy/thinker/leads/', a_file), 'r')
Here's a rewrite using glob to limit which files are considered;
import glob
import os
import re
import sys
if sys.hexversion < 0x3000000:
# Python 2.x
inp = raw_input
else:
# Python 3.xrange
inp = input
def get_dir(prompt):
while True:
dir_name = inp(prompt)
dir_name = os.path.join(os.getcwd(), dir_name)
if os.path.isdir(dir_name):
return dir_name
else:
print("{} does not exist or is not a directory".format(dir_name))
def files_in_dir(dir_name, file_spec="*.txt"):
return glob.glob(os.path.join(dir_name, file_spec))
def file_iter(files):
for fname in files:
with open(fname) as inf:
yield fname, inf.read()
def main():
email_dir = get_dir("Please enter email directory: ")
email_files = files_in_dir(email_dir, "*.eml")
print(email_files)
content = [txt for fname,txt in file_iter(email_files)]
print(content)
if __name__=="__main__":
main()
and a trial run looks like
Please enter email directory: c:\temp
['c:\\temp\\file1.eml', 'c:\\temp\\file2.eml']
['file1 line one\nfile1 line two\nfile1 line three',
'file2 line one\nfile2 line two']
Ok well i have another question. I implemented the error checking but for some reason it still isn't working. I still get a python error instead of the error i just wrote in the program.
Traceback (most recent call last):
File "E:/python/copyfile.py", line 31, in <module>
copyFile()
File "E:/python/copyfile.py", line 8, in copyFile
file1 = open(source,"r")
IOError: [Errno 2] No such file or directory: 'C:/Users/Public/asdf.txt'
check out the shutil module in standard library:
shutil.copyfile(src, dst)
http://docs.python.org/2/library/shutil.html#shutil.copyfile
I would rather ask you to write your own:
import os
import hashlib
def md5ChkSum(_file): # Calculates MD5 CheckSum
with open(_file, 'rb') as fp:
hash_obj = hashlib.md5()
line = fp.readline()
while line:
hash_obj.update(line)
line = fp.readline()
return hash_obj.hexdigest()
def copier(_src, _dst):
if not os.path.exists(_src):
return False
_src_fp = open(_src, "r")
_dst_fp = open(_dst, "w")
line = _src_fp.readline()
while line:
_dst_fp.write(line)
line = _src_fp.readline()
_src_fp.close()
_dst_fp.close()
if md5ChkSum(_src) == md5ChkSum(_dst):
return "Copy: SUCCESSFUL"
return "Copy: FAILED"
res = copier(r"/home/cnsiva/6.jpg", r"/home/cnsiva/6_copied.jpg")
if not res:
print "FILE Does not Exists !!!"
else: print res
OUTPUT:
Copy: SUCCESSFUL