Python3 - Cannot import local module due to FileNotFoundError - python

Within my code I am attempting to import modules written by myself. Said modules interact with files on the system, either encrypting, decrypting or removing them altogether. When trying to import these modules I receive the below:
FileNotFoundError: [Errno 2] No such file or directory: <Directory>
Oddly this error seems to appear before anything local within the file runs (calls are at the end), making me believe python is parsing the files during the import phase and not find the files (apologies but I am novice when it comes to importing self created stuff)
Below is one of the methods I am importing (also the one flagging the error)
import urllib.request, re, os
def getKey(url, path):
urllib.request.urlretrieve(url, 'C:\\code\\'+path)
def readFiles(keyFile, targetFile, outfile):
infile = keyFile # read in the input file name
infd = open('C:\\code\\'+infile,"r") # open the file and create the file descriptor infd
key = infd.read( )
key = key.strip('\n')
#print('key= '+key)
infile = targetFile
infd = open('C:\\code\\'+infile, "r")
ptext = infd.read( ).strip('\n')
infd.close
xor(outfile, ptext, key)
def xor(outfile, ptext, key):
outfd = open('C:\\code\\'+outfile, "w")
pLength = len(ptext)
#get the length of the plaintext and cut the key into the same size
keyChunks = [key[i:i+pLength] for i in range(0, len(key), pLength)]
i = 0
while i < len(keyChunks):
a = int(ptext)
b = int(keyChunks[i])
out = a ^ b
i=i+1
outfd = open('C:\\code\\'+outfile, "w")
outfd.write(str(out))
outfd.close
def cleanup(targetFile):
os.remove(targetFile)
def enc():
outfile = 'affk.xor'
getKey('http://192.168.56.10/sym.key', 'sym.key')
readFiles('sym.key', 'affk.txt', outfile)
cleanup('C:/code/affk.txt')
def dec():
outfile = 'affk.txt'
getKey('http://192.168.56.10/sym.key', 'sym.key')
readFiles('sym.key', 'affk.xor', outfile)
cleanup('C:/code/affk.xor')
cleanup('C:/code/sym.key')
enc()
#dec()
Below is a snippet of my main file with the imports and calling function
import os, math, affine, re, urllib.request, ctypes
from xor import enc
from rsa1 import run
---Additional Code here---
def main():
# Call function to search for the files we want to encrypt
search()
# Call function to read the target files. Main ciphers are daisy chained off reader function
reader(targets)
# Call function to XOR Affine cipher key
xor.enc()
# Call function to encrypt key used for above XOR using RSA
rsa1.run()
Any assistance with this problem would be greatly appreciated! Apologies if this is an extremely dumb question!

Related

Python file delete PermissionError: [WinError 32]

I am trying to delete a duplicated image by comparing md5 file hash.
my code is
from PIL import Image
import hashlib
import os
import sys
import io
img_file = urllib.request.urlopen(img_url, timeout=30)
f = open('C:\\Users\\user\\Documents\\ + img_name, 'wb')
f.write(img_file.read())
f.close # subject image, status = ok
im = Image.open('C:\\Users\\user\\Documents\\ + img_name)
m = hashlib.md5() # get hash
with io.BytesIO() as memf:
im.save(memf, 'PNG')
data = memf.getvalue()
m.update(data)
md5hash = m.hexdigest() # hash done, status = ok
im.close()
if md5hash in hash_list[name]: # comparing hash
os.remove('C:\\Users\\user\\Documents\\ + img_name) # delete file, ERROR
else:
hash_list[name].append(m.hexdigest())
and i get this error
PermissionError: [WinError 32] The process cannot access the file because it is being used by another process:
'C:\\Users\\user\\Documents\\myimage.jpg'
I tried admin command prompt, but still getting this error. Could you find what is accessing the file?
Just noticed you're using f.close instead of f.close()
Add () and check if problem still occurs.
Cheers ;)
Your issue has indeed been as Adrian Daniszewski said, however, there are quite few more programming problems with your code.
First of all, you should familiarize yourself with with. You use with for BytesIO() but it can also be used for opening files.
The benefit of with open(...) as f: is the fact that you don't have to search whether you closed the file or remember to close it. It will close the file at the end of its indentation.
Second, there is a bit of duplication in your code. Your code should be DRY to avoid being forced to change multiple locations with the same stuff.
Imagine having to change where you save the byte files. Right now you will be forced to change in three different locations.
Now imagine not noticing one of these locations.
My suggestion would be first of all to save the path to a variable and use that -
bytesimgfile = 'C:\\Users\\user\\Documents\\' + img_name
An example to use with in your code would be like this:
with open(bytesimgfile , 'wb') as f:
f.write(img_file.read())
A full example with your given code:
from PIL import Image
import hashlib
import os
import sys
import io
img_file = urllib.request.urlopen(img_url, timeout=30)
bytesimgfile = 'C:\\Users\\user\\Documents\\' + img_name
with open(bytesimgfile , 'wb'):
f.write(img_file.read())
with Image.open(bytesimgfile) as im:
m = hashlib.md5() # get hash
with io.BytesIO() as memf:
im.save(memf, 'PNG')
data = memf.getvalue()
m.update(data)
md5hash = m.hexdigest() # hash done, status = ok
if md5hash in hash_list[name]: # comparing hash
os.remove(bytesimgfile) # delete file, ERROR
else:
hash_list[name].append(m.hexdigest())

Python: Creating and writing to a file error

I am having trouble creating and writing to a text file in Python. I am running Python 3.5.1 and have the following code to try and create and write to a file:
from os import *
custom_path = "MyDirectory/"
if not path.exists(custom_path)
mkdir(custom_path)
text_path = custom_path + "MyTextFile.txt"
text_file = open(text_path, "w")
text_file.write("my text")
But I get a TypeError saying an integer is required (got type str) at the line text_file = open(text_path, "w").
I don't know what I'm doing wrong as my code is just about identical to that of several tutorial sites showing how to create and write to files.
Also, does the above code create the text file if it doesn't exist, and if not how do I create it?
Please don't import everything from os module:
from os import path, mkdir
custom_path = "MyDirectory/"
if not path.exists(custom_path):
mkdir(custom_path)
text_path = custom_path + "MyTextFile.txt"
text_file = open(text_path, 'w')
text_file.write("my text")
Because there also a "open" method in os module which will overwrite the native file "open" method.

Maya Python - Embed zip file into maya file?

This was a suggestion from another stack thread that I'm finally getting back to. It was part of a discussion about how to embed tools into a maya file.
You can write the whole thing as a python package, zip it, then stuff the binary contents of the zip file into a fileInfo. When you need to code, look for it in the user's $MAYA_APP_DIR; if there's no zip, write the contents of the fileInfo to disk as a zip and then insert the zip into sys.path
Source discussions were:
python copy scripts into script
and Maya Python Create and Use Zipped Package?
So far the programming is going okay, but I think I hit a snag. When I attempt this:
with open("..directory/myZip.zip","rb") as file:
cmds.fileInfo("myZip", file.read())
..and then I..
print cmds.fileInfo("myZip",q=1)
I get
[u'PK\003\004\024']
which is a bad translation of the first line of gibberish when reading the zip file as a text document.
How can I embed my zip file into my maya file as binary?
====================
Update:
Maya doesn't like writing to the file as a direct read of the utf-8 encoded zip file. I found various methods of making it into an acceptable string that could be written, but the decoding back to the file didn't appear to work. I see now that Theodox's suggestion was to write it to binary and put that in the fileInfo node.
How can one encode, store and then decode to write to file later?
If I were to convert to binary using for instance:
' '.join(format(ord(x), 'b') for x in line)
What code would I need to turn that back into the original utf-8 zip information?
you can find related code here:
http://tech-artists.org/forum/showthread.php?4161-Maya-API-Singleton-Nodes&highlight=mayapersist
the relevant bit is
import base64
encoded = base64.b64encode(value)
decoded = base64.b64decode(encoded)
basically it's the same idea, except using the base64 module instead of binascii. Any method of converting an arbitary character stream to an ascii-safe representation will work fine, as long as you use a reversable method: the potential problem you need to watch out for is a character in the data block that looks to maya like a close quote - an open quote int he fileInfo will be messy in an MA file.
This example uses YAML to do arbitrary key-value pairs but that part is irrelevant to storing the binary stuff. I've used this technique for fairly large data (up to 640k if i recall) but I don't know if it has an upper limit in terms of what you can stash in Maya
Found the answer. Great script on stack overflow. I had to encode to 'string_escape', which is something I found while trying to figure out the whole characters situation. But anyways, you open the zip, encode to 'string_escape', write it to fileInfo and then before fetching to write it back to zip, you decode it back.
Convert binary to ASCII and vice versa
import maya.cmds as cmds
import binascii
def text_to_bits(text, encoding='utf-8', errors='surrogatepass'):
bits = bin(int(binascii.hexlify(text.encode(encoding, errors)), 16))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def text_from_bits(bits, encoding='utf-8', errors='surrogatepass'):
n = int(bits, 2)
return int2bytes(n).decode(encoding, errors)
def int2bytes(i):
hex_string = '%x' % i
n = len(hex_string)
return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
And then you can
with open("..\maya\scripts/test.zip","rb") as thing:
texty = text_to_bits(thing.read().encode('string_escape'))
cmds.fileInfo("binaryZip",texty)
...later
with open("..\maya\scripts/test_2.zip","wb") as thing:
texty = cmds.fileInfo("binaryZip",q=1)
thing.write( text_from_bits( texty ).decode('string_escape') )
and this appears to work.. so far..
Figured I'd post the final product for anyone wanting to undertake this approach. I tried to do some corruption checking, so that a bad zip doesn't get passed between machines. That's what all the check hashing is for.
def writeTimeFull(tl):
import TimeFull
#reload(TimeFull)
with open(TimeFull.__file__.replace(".pyc",".py"),"r") as file:
cmds.scriptNode( tl.scriptConnection[1][0], e=1, bs=file.read() )
cmds.expression("spark_timeliner_activator",
e=1,s='if (Spark_Timeliner.ShowTimeliner == 1)\n'
'{\n'
'\tsetAttr Spark_Timeliner.ShowTimeliner 0;\n'
'\tpython \"Timeliner.InitTimeliner()\";\n'
'}',
o="Spark_Timeliner",ae=1,uc=all)
def checkHash(zipPath,hash1,hash2,hash3):
check = False
hashes = [hash1,hash2,hash3]
for ii, hash in enumerate(hashes):
if hash == hashes[(ii+1)%3]:
hashes[(ii+2)%3] = hashes[ii]
check = True
if check:
if md5(zipPath) == hashes[0]:
return [zipPath,hashes[0],hashes[1],hashes[2]]
else:
cmds.warning("Hash checks and/or zip are corrupted. Attain toolbox_fix.zip, put it in scripts folder and restart.")
return []
#this writes the zip file to the local users space
def saveOutZip(filename):
if os.path.isfile(filename):
if not os.path.isfile(filename.replace('_pkg','_'+__version__)):
os.rename(filename,filename.replace('_pkg','_'+__version__))
with open(filename,"w") as zipFile:
zipInfo = cmds.fileInfo("zipInfo1",q=1)[0]
zipHash_1 = cmds.fileInfo("zipHash1",q=1)[0]
zipHash_2 = cmds.fileInfo("zipHash2",q=1)[0]
zipHash_3 = cmds.fileInfo("zipHash3",q=1)[0]
zipFile.write( base64.b64decode(zipInfo) )
if checkHash(filename,zipHash_1,zipHash_2,zipHash_3):
cmds.fileInfo("zipInfo2",zipInfo)
return filename
with open(filename,"w") as zipFile:
zipInfo = cmds.fileInfo("zipInfo2",q=1)[0]
zipHash_1 = cmds.fileInfo("zipHash1",q=1)[0]
zipHash_2 = cmds.fileInfo("zipHash2",q=1)[0]
zipHash_3 = cmds.fileInfo("zipHash3",q=1)[0]
zipFile.write( base64.b64decode(zipInfo) )
if checkHash(filename,zipHash_1,zipHash_2,zipHash_3):
cmds.fileInfo("zipInfo1",zipInfo)
return filename
return False
#this writes the local zip to this file
def loadInZip(filename):
zipResults = []
for ii in range(0,10):
with open(filename,"r") as theRead:
zipResults.append([base64.b64encode(theRead.read())]+checkHash(filename,md5(filename),md5(filename),md5(filename)))
if ii>0 and zipResults[ii]==zipResults[ii-1]:
cmds.fileInfo("zipInfo1",zipResults[ii][0])
cmds.fileInfo("zipInfo2",zipResults[ii-1][0])
cmds.fileInfo("zipHash1",zipResults[ii][2])
cmds.fileInfo("zipHash2",zipResults[ii][3])
cmds.fileInfo("zipHash3",zipResults[ii][4])
return True
#file check
#http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file
def md5(fname):
import hashlib
hash = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash.update(chunk)
return hash.hexdigest()
filename = path+'/toolbox_pkg.zip'
zipPaths = [path+'/toolbox_update.zip',
path+'/toolbox_fix.zip',
path+'/toolbox_'+__version__+'.zip',
filename]
zipPaths_exist = [os.path.isfile(zipPath) for zipPath in zipPaths ]
if any(zipPaths_exist[:2]):
if zipPaths_exist[0]:
cmds.warning('Timeliner update present. Forcing file to update version')
if zipPaths_exist[2]:
os.remove(zipPaths[3])
elif os.path.isfile(zipPaths[3]):
os.rename(zipPaths[3], zipPaths[2])
os.rename(zipPaths[0],zipPaths[3])
if zipPaths_exist[1]:
os.remove(zipPaths[1])
else:
cmds.warning('Timeliner fix present. Replacing file to the fix version.')
if os.path.isfile(zipPaths[3]):
os.remove(zipPaths[3])
os.rename(zipPaths[1],zipPaths[3])
loadInZip(filename)
if not cmds.fileInfo("zipInfo1",q=1) and not cmds.fileInfo("zipInfo2",q=1):
loadInZip(filename)
if not os.path.isfile(filename):
saveOutZip(filename)
sys.path.append(filename)
import Timeliner
Timeliner.InitTimeliner(theVers=__version__)
if not any(zipPaths[:2]):
if __version__ > Timeliner.__version__:
cmds.warning('Saving out newer version of timeliner to local machine. Restart Maya to access latest version.')
saveOutZip(filename)
elif __version__ < Timeliner.__version__:
cmds.warning('Timeliner on machine is newer than file version. Saving machine version over timeliner in file.')
loadInZip(filename)
__version__ = Timeliner.__version__
if __name__ != "__main__":
tl = getTimeliner()
writeTimeFull(tl)

tempfile is not accessible when using subprocess.Popen

When I run the following script, the error"Command line argument error: Argument "query". File is not accessible" occurs. I'm using python 3.4.2.
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import subprocess
import tempfile
import sys
def main():
# read name file and put all identifications into a list
infile_I = open('OTU_name.txt','r')
name = infile_I.read().split('>')
infile_I.close()
# extract sequence segments to a temporary file one at a time
for i in name:
i = i.replace('\n','')
for j in SeqIO.parse("GemSIM_OTU_ids.fa","fasta"):
if str(i) == str(j.id):
f = tempfile.NamedTemporaryFile()
record = j.seq
f.write(bytes(str(record),'UTF-8'))
f.seek(0)
f = f.read().decode()
Result = subprocess.Popen(['blastn','-remote','-db','chromosome','-query',f,'-out',str(i)],stdout=subprocess.PIPE)
output = Result.communicate()[0]
if __name__== '__main__': main()
f = tempfile.NamedTemporaryFile() returns a file-like object which you're trying to provide as a command line argument. Instead, you want the actual filename which is available via its .name attribute - although I'm somewhat confused why you're creating a tempfile, writing to it, seeking back to position 0, then replacing your tempfile f object with the contents of the file? I suspect you don't want to do that replacement and use f.name for your query.
Result = subprocess.Popen(['blastn','-remote','-db','chromosome','-query',f.name,'-out',str(i)],stdout=subprocess.PIPE)
Also, there's some convenient wrapper functions around subprocess.Popen such as subprocess.check_output which are also somewhat more explicit as to your intent which could be used here instead.

Extract the SHA1 hash from a torrent file

I've had a look around for the answer to this, but I only seem to be able to find software that does it for you. Does anybody know how to go about doing this in python?
I wrote a piece of python code that verifies the hashes of downloaded files against what's in a .torrent file. Assuming you want to check a download for corruption you may find this useful.
You need the bencode package to use this. Bencode is the serialization format used in .torrent files. It can marshal lists, dictionaries, strings and numbers somewhat like JSON.
The code takes the hashes contained in the info['pieces'] string:
torrent_file = open(sys.argv[1], "rb")
metainfo = bencode.bdecode(torrent_file.read())
info = metainfo['info']
pieces = StringIO.StringIO(info['pieces'])
That string contains a succession of 20 byte hashes (one for each piece). These hashes are then compared with the hash of the pieces of on-disk file(s).
The only complicated part of this code is handling multi-file torrents because a single torrent piece can span more than one file (internally BitTorrent treats multi-file downloads as a single contiguous file). I'm using the generator function pieces_generator() to abstract that away.
You may want to read the BitTorrent spec to understand this in more details.
Full code bellow:
import sys, os, hashlib, StringIO, bencode
def pieces_generator(info):
"""Yield pieces from download file(s)."""
piece_length = info['piece length']
if 'files' in info: # yield pieces from a multi-file torrent
piece = ""
for file_info in info['files']:
path = os.sep.join([info['name']] + file_info['path'])
print path
sfile = open(path.decode('UTF-8'), "rb")
while True:
piece += sfile.read(piece_length-len(piece))
if len(piece) != piece_length:
sfile.close()
break
yield piece
piece = ""
if piece != "":
yield piece
else: # yield pieces from a single file torrent
path = info['name']
print path
sfile = open(path.decode('UTF-8'), "rb")
while True:
piece = sfile.read(piece_length)
if not piece:
sfile.close()
return
yield piece
def corruption_failure():
"""Display error message and exit"""
print("download corrupted")
exit(1)
def main():
# Open torrent file
torrent_file = open(sys.argv[1], "rb")
metainfo = bencode.bdecode(torrent_file.read())
info = metainfo['info']
pieces = StringIO.StringIO(info['pieces'])
# Iterate through pieces
for piece in pieces_generator(info):
# Compare piece hash with expected hash
piece_hash = hashlib.sha1(piece).digest()
if (piece_hash != pieces.read(20)):
corruption_failure()
# ensure we've read all pieces
if pieces.read():
corruption_failure()
if __name__ == "__main__":
main()
Here how I've extracted HASH value from torrent file:
#!/usr/bin/python
import sys, os, hashlib, StringIO
import bencode
def main():
# Open torrent file
torrent_file = open(sys.argv[1], "rb")
metainfo = bencode.bdecode(torrent_file.read())
info = metainfo['info']
print hashlib.sha1(bencode.bencode(info)).hexdigest()
if __name__ == "__main__":
main()
It is the same as running command:
transmissioncli -i test.torrent 2>/dev/null | grep "^hash:" | awk '{print $2}'
Hope, it helps :)
According to this, you should be able to find the md5sums of files by searching for the part of the data that looks like:
d[...]6:md5sum32:[hash is here][...]e
(SHA is not part of the spec)

Categories