python read from fd directly into bytearray - python

Is there a means to read from a file descriptor (not an IO-like object) directly into a bytearray?
Right now I use a temporary FileIO object to mediate, something like:
def fd_readinto(fd, ba):
fio = io.FileIO(fd, closefd = False)
return fio.readinto(ba)

There is no function that does this, and your method is already the fastest approach.
I was going to suggest bytearray(mmap), array.fromfile, and even a homebrew os.read() using bytearray and memoryview, but FileIO.readinto is screaming fast. (It makes sense that it would be because it performs only one system call.)
import os
import mmap, io, array
import timeit
fn = 'path-to-largeish-file'
def fd_readinto_mmap(fd, ba):
m = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
ba.extend(m)
m.close()
def fd_readinto_fio(fd, ba):
sz = os.fstat(fd).st_size
ba2 = bytearray(sz)
with io.FileIO(fd, closefd = False) as fio:
fio.readinto(ba2)
ba.extend(ba2)
def fd_readinto_array(fd, ba):
ar = array.array('c')
sz = os.fstat(fd).st_size
fp = os.fdopen(fd, 'rb')
ar.fromfile(fp, sz)
ba.extend(ar)
def fd_readinto_mv(fd, ba):
stat = os.fstat(fd)
blksize = getattr(stat, 'st_blksize', 4096)
bufsize = stat.st_size
buf = bytearray(bufsize)
m = memoryview(buf)
while True:
b = os.read(fd, blksize)
s = len(b)
if not s: break
m[:s], m = b, m[s:]
writtenbytes = buffer(buf, 0, bufsize-len(m))
ba.extend(writtenbytes)
setup = """
from __main__ import fn, fd_readinto_mmap, fd_readinto_fio, fd_readinto_array, fd_readinto_mv
import os
openfd = lambda : os.open(fn, os.O_RDONLY)
closefd = lambda fd: os.close(fd)
"""
reps = 2
tests = {
'fio' : "fd=openfd(); fd_readinto_fio(fd, bytearray()); closefd(fd)",
'mmap': "fd=openfd(); fd_readinto_mmap(fd, bytearray()); closefd(fd)",
'array': "fd=openfd(); fd_readinto_array(fd, bytearray());",
'mv' : "fd=openfd(); fd_readinto_mv(fd, bytearray()); closefd(fd)",
}
width = max(map(len, tests))
for n,t in tests.iteritems():
time = timeit.timeit(t, setup, number=reps)
print ("{:%s} {}" % width).format(n, time)
On my system (OS X 10.14.6, Python 2.7.10), FileIO is the fastest option:
mmap 7.19839119911
array 5.72453403473
mv 0.49933886528
fio 0.299485206604

Related

National Instruments USB X-SERIES with Python-nidaqmx

I am trying to drive 4 independent channels using the python nidaqmx module and the NI X-series 6341 (PN: 781438-01). I have 2 analogue outputs and two digital outputs and I would like all these streams independent of each other. For some reason when I execute the code only my 2 analogue outs and digital out on line 0 fire. I do not see the digital stream on line 1. Does anyone know what may be going on here? I've tried it with another box and get the same behaviour so I don't think its hardware related. Here is the code:
import nidaqmx
from numpy import array
from nidaqmx import stream_writers
import numpy as np
from tkinter import filedialog
from numpy import genfromtxt
import pandas as pd
from nidaqmx.constants import LineGrouping
Devs = []
system = nidaqmx.system.System.local()
print(system.driver_version)
for device in system.devices:
dev = str(device).split('=')[1].split(')')[0]
Devs.append(dev)
print(device)
print(dev)
def detectDelimiter(csvFile):
with open(csvFile, 'r') as myCsvfile:
header=myCsvfile.readline()
if header.find(";")!=-1:
return ";"
if header.find(",")!=-1:
return ","
if header.find("\t")!=-1:
return "\t"
My_Data = []
My_Data_unscaled = []
def load_data():
file_path = filedialog.askopenfilename()
delim = detectDelimiter(file_path)
my_data = genfromtxt(file_path, delimiter=delim)
if len (My_Data) > 0 :
print('Deleting Data in the buffer...')
My_Data.clear()
My_Data.append(my_data)
My_Data_unscaled.append(my_data)
else:
#original_data = my_data
#My_Data = []
My_Data.append(my_data)
My_Data_unscaled.append(my_data)
load_data()
look = My_Data[0]
e_dataframe = pd.DataFrame(look)
v_step = 20/2**16
e_dataframe[0] = e_dataframe[0]*v_step
e_dataframe[1] = e_dataframe[1]*v_step
samples_new = [e_dataframe[1].T,e_dataframe[0].T]
samples_new = array(samples_new)
dig_samples_new = [e_dataframe[2].T,e_dataframe[2].T]
dig_samples_new = array(dig_samples_new)
dig_samples_new[0,0] = 1
dig_samples_new[0,0] = 1
dig_samples_new[1,0] = 1
def fire_galvos(dev,rate,dig_las):
#define channels
channel1 = dev +'/' + 'ao0' # laser trigger
channel2 = dev + '/' + 'ao1' # this is the auxillary trigger signal
channel3 = dev + '/line0'
channel4 = dev + '/line1'
#define clock
sample_clock = '/'+dev+'/ao/SampleClock'
with nidaqmx.Task() as analog_output, nidaqmx.Task() as digital_output:
dig_las = np.uint32(dig_las)
#add channels
analog_output.ao_channels.add_ao_voltage_chan(channel1,'mychannel1',-10,10)
analog_output.ao_channels.add_ao_voltage_chan(channel2,'mychannel2',-10,10)
digital_output.do_channels.add_do_chan(channel3, 'mychannel3')
digital_output.do_channels.add_do_chan(channel4, 'mychannel4')
#digital_output.do_channels.add_do_chan(channel4, 'mychannel4',line_grouping=LineGrouping.CHAN_PER_LINE)
#digital_output.ao_load_impedance = 50
#define clock timings
analog_output.timing.cfg_samp_clk_timing(rate=rate, sample_mode=nidaqmx.constants.AcquisitionType.FINITE, samps_per_chan = len(samples_new[0]))
digital_output.timing.cfg_samp_clk_timing(rate=rate, source = sample_clock, sample_mode=nidaqmx.constants.AcquisitionType.FINITE, samps_per_chan=len(dig_samples_new[0])) #source=sample_clock,
#writing commands
writer_ana = stream_writers.AnalogMultiChannelWriter(analog_output.out_stream, auto_start=False)
writer_dig = stream_writers.DigitalMultiChannelWriter(digital_output.out_stream,auto_start=False)
#writer_dig = stream_writers.DigitalSingleChannelWriter(digital_output.out_stream,auto_start=False)
writer_ana.write_many_sample(samples_new)
writer_dig.write_many_sample_port_uint32(dig_las)
digital_output.start()
analog_output.start()
digital_output.wait_until_done(timeout=60)
analog_output.wait_until_done(timeout=60)
fire_galvos(dev,3000,dig_samples_new)```

Saving 3D+Time (XYCZT) stacks with Bio-Formats on Python

I'm having some trouble to save 3D+Time Tiff files, from Numpy arrays in Python, using the Bioformats standard.
For a start, I have a numpy array with 5 dimensions, ordered XYCZT, in my case (267, 518, 1, 331, 3).
Here is the function I have so far:
# Write file to disk
def write(img_XYCZT, path, type='uint16', verbose=True):
import bioformats.omexml as ome
import javabridge as jutil
import bioformats
import numpy as np
import os
import sys
if verbose:
print('Dimensions (XYCZT): ' + str(np.shape(img_XYCZT)))
sys.stdout.flush()
# Get the new dimensions
SizeX = np.shape(img_XYCZT)[0]
SizeY = np.shape(img_XYCZT)[1]
SizeC = np.shape(img_XYCZT)[2]
SizeZ = np.shape(img_XYCZT)[3]
SizeT = np.shape(img_XYCZT)[4]
# Start JVM for bioformats
jutil.start_vm(class_path=bioformats.JARS)
# Getting metadata info
omexml = ome.OMEXML()
omexml.image(0).Name = os.path.split(path)[1]
p = omexml.image(0).Pixels
assert isinstance(p, ome.OMEXML.Pixels)
p.SizeX = SizeX
p.SizeY = SizeY
p.SizeC = SizeC
p.SizeT = SizeT
p.SizeZ = SizeZ
p.DimensionOrder = ome.DO_XYCZT
p.PixelType = type
p.channel_count = SizeC
p.plane_count = SizeZ
p.Channel(0).SamplesPerPixel = SizeC
omexml.structured_annotations.add_original_metadata(ome.OM_SAMPLES_PER_PIXEL, str(SizeC))
# Converting to omexml
xml = omexml.to_xml()
# Write file using Bioformats
if verbose:
print ('Writing frames:'),
sys.stdout.flush()
for frame in range(SizeT):
if verbose:
print('[' + str(frame + 1) + ']'),
sys.stdout.flush()
index = frame
pixel_buffer = bioformats.formatwriter.convert_pixels_to_buffer(img_XYCZT[:, :, :, :, frame], type)
script = """
importClass(Packages.loci.formats.services.OMEXMLService,
Packages.loci.common.services.ServiceFactory,
Packages.loci.formats.out.TiffWriter);
var service = new ServiceFactory().getInstance(OMEXMLService);
var metadata = service.createOMEXMLMetadata(xml);
var writer = new TiffWriter();
writer.setBigTiff(true);
writer.setMetadataRetrieve(metadata);
writer.setId(path);
writer.setInterleaved(true);
writer.saveBytes(index, buffer);
writer.close();
"""
jutil.run_script(script, dict(path=path, xml=xml, index=index, buffer=pixel_buffer))
if verbose:
print ('[Done]')
sys.stdout.flush()
if verbose:
print('File saved on ' + str(path))
sys.stdout.flush()
Being img_XYCZT the numpy array, and path the place to save the file. Probably the function uses lots of redundancy for the metadata, but thats me fighting for it to work somehow...
Checking the saved file on Fiji, the Z information is as C channels:
The file simply doesn't have the Z dimension... I've been struggling with this for some time, any help is highly appreciated !
Thanks

Python 3 - Can pickle handle byte objects larger than 4GB?

Based on this comment and the referenced documentation, Pickle 4.0+ from Python 3.4+ should be able to pickle byte objects larger than 4 GB.
However, using python 3.4.3 or python 3.5.0b2 on Mac OS X 10.10.4, I get an error when I try to pickle a large byte array:
>>> import pickle
>>> x = bytearray(8 * 1000 * 1000 * 1000)
>>> fp = open("x.dat", "wb")
>>> pickle.dump(x, fp, protocol = 4)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
OSError: [Errno 22] Invalid argument
Is there a bug in my code or am I misunderstanding the documentation?
Here is a simple workaround for issue 24658. Use pickle.loads or pickle.dumps and break the bytes object into chunks of size 2**31 - 1 to get it in or out of the file.
import pickle
import os.path
file_path = "pkl.pkl"
n_bytes = 2**31
max_bytes = 2**31 - 1
data = bytearray(n_bytes)
## write
bytes_out = pickle.dumps(data)
with open(file_path, 'wb') as f_out:
for idx in range(0, len(bytes_out), max_bytes):
f_out.write(bytes_out[idx:idx+max_bytes])
## read
bytes_in = bytearray(0)
input_size = os.path.getsize(file_path)
with open(file_path, 'rb') as f_in:
for _ in range(0, input_size, max_bytes):
bytes_in += f_in.read(max_bytes)
data2 = pickle.loads(bytes_in)
assert(data == data2)
To sum up what was answered in the comments:
Yes, Python can pickle byte objects bigger than 4GB. The observed error is caused by a bug in the implementation (see Issue24658).
Here is the full workaround, though it seems pickle.load no longer tries to dump a huge file anymore (I am on Python 3.5.2) so strictly speaking only the pickle.dumps needs this to work properly.
import pickle
class MacOSFile(object):
def __init__(self, f):
self.f = f
def __getattr__(self, item):
return getattr(self.f, item)
def read(self, n):
# print("reading total_bytes=%s" % n, flush=True)
if n >= (1 << 31):
buffer = bytearray(n)
idx = 0
while idx < n:
batch_size = min(n - idx, 1 << 31 - 1)
# print("reading bytes [%s,%s)..." % (idx, idx + batch_size), end="", flush=True)
buffer[idx:idx + batch_size] = self.f.read(batch_size)
# print("done.", flush=True)
idx += batch_size
return buffer
return self.f.read(n)
def write(self, buffer):
n = len(buffer)
print("writing total_bytes=%s..." % n, flush=True)
idx = 0
while idx < n:
batch_size = min(n - idx, 1 << 31 - 1)
print("writing bytes [%s, %s)... " % (idx, idx + batch_size), end="", flush=True)
self.f.write(buffer[idx:idx + batch_size])
print("done.", flush=True)
idx += batch_size
def pickle_dump(obj, file_path):
with open(file_path, "wb") as f:
return pickle.dump(obj, MacOSFile(f), protocol=pickle.HIGHEST_PROTOCOL)
def pickle_load(file_path):
with open(file_path, "rb") as f:
return pickle.load(MacOSFile(f))
You can specify the protocol for the dump.
If you do pickle.dump(obj,file,protocol=4) it should work.
Reading a file by 2GB chunks takes twice as much memory as needed if bytes concatenation is performed, my approach to loading pickles is based on bytearray:
class MacOSFile(object):
def __init__(self, f):
self.f = f
def __getattr__(self, item):
return getattr(self.f, item)
def read(self, n):
if n >= (1 << 31):
buffer = bytearray(n)
pos = 0
while pos < n:
size = min(n - pos, 1 << 31 - 1)
chunk = self.f.read(size)
buffer[pos:pos + size] = chunk
pos += size
return buffer
return self.f.read(n)
Usage:
with open("/path", "rb") as fin:
obj = pickle.load(MacOSFile(fin))
Had the same issue and fixed it by upgrading to Python 3.6.8.
This seems to be the PR that did it: https://github.com/python/cpython/pull/9937
I also found this issue, to solve this problem i chunk the code into several iteration. Let say in this case i have 50.000 data which i have to calc tf-idf and do knn classfication. When i run and directly iterate 50.000 it give me "that error". So, to solve this problem i chunk it.
tokenized_documents = self.load_tokenized_preprocessing_documents()
idf = self.load_idf_41227()
doc_length = len(documents)
for iteration in range(0, 9):
tfidf_documents = []
for index in range(iteration, 4000):
doc_tfidf = []
for term in idf.keys():
tf = self.term_frequency(term, tokenized_documents[index])
doc_tfidf.append(tf * idf[term])
doc = documents[index]
tfidf = [doc_tfidf, doc[0], doc[1]]
tfidf_documents.append(tfidf)
print("{} from {} document {}".format(index, doc_length, doc[0]))
self.save_tfidf_41227(tfidf_documents, iteration)

Getting the memory layout out of an (avr)elf file by useing python + pyElftools

I am creating my own bootloader for an ATXmega128A4U. To use the bootloader I want to transform the ELF-file of the firmware into a memory map used in the the ATXmega.
For that I use python and the modul "pyelftools". The documentation of it is poor and so I run into a problem: I do not know what information I can use to get the address, offset etc. from the data at the sections.
My goal is to create a bytearray, copy the data/code into it and transfer it to the bootlaoder. Below is my code:
import sys
# If pyelftools is not installed, the example can also run from the root or
# examples/ dir of the source distribution.
sys.path[0:0] = ['.', '..']
from elftools.common.py3compat import bytes2str
from elftools.elf.elffile import ELFFile
# 128k flash for the ATXmega128a4u
flashsize = 128 * 1024
def process_file(filename):
with open(filename, 'rb') as f:
# get the data
elffile = ELFFile(f)
dataSec = elffile.get_section_by_name(b'.data')
textSec = elffile.get_section_by_name(b'.text')
# prepare the memory
flashMemory = bytearray(flashsize)
# the data section
startAddr = dataSec.header.sh_offset
am = dataSec.header.sh_size
i = 0
while i < am:
val = dataSec.stream.read(1)
flashMemory[startAddr] = val[0]
startAddr += 1
i += 1
# the text section
startAddr = textSec.header.sh_offset
am = textSec.header.sh_size
i = 0
while i < am:
print(str(startAddr) + ' : ' + str(i))
val = textSec.stream.read(1)
flashMemory[startAddr] = val[0]
startAddr += 1
i += 1
print('finished')
if __name__ == '__main__':
process_file('firmware.elf')
Hope someone can tell me how to solve this problem.
I manged to solve the problem.
don't read the data manualy from the stream by "textSec.stream.read" use "textSec.data()" instead. Internaly (see "sections.py") a seek operation in the file is done. Afterwards the data is read. The result will be the valid data chunk.
The following code reads the code(text) section of a atxmega firmware and copies it into a bytearray which has the layout of the flash of an atxmega128a4u device.
#vlas_tepesch: the hex conversation is not needed and the the 64k pitfall is avoided.
sys.path[0:0] = ['.', '..']
from elftools.common.py3compat import bytes2str
from elftools.elf.elffile import ELFFile
# 128k flash for the ATXmega128a4u
flashsize = 128 * 1024
def __printSectionInfo (s):
print ('[{nr}] {name} {type} {addr} {offs} {size}'.format(
nr = s.header['sh_name'],
name = s.name,
type = s.header['sh_type'],
addr = s.header['sh_addr'],
offs = s.header['sh_offset'],
size = s.header['sh_size']
)
)
def process_file(filename):
print('In file: ' + filename)
with open(filename, 'rb') as f:
# get the data
elffile = ELFFile(f)
print ('sections:')
for s in elffile.iter_sections():
__printSectionInfo(s)
print ('get the code from the .text section')
textSec = elffile.get_section_by_name(b'.text')
# prepare the memory
flashMemory = bytearray(flashsize)
# the text section
startAddr = textSec.header['sh_addr']
val = textSec.data()
flashMemory[startAddr:startAddr+len(val)] = val
# print memory
print('finished')
if __name__ == '__main__':
process_file('firmware.elf')
Tanks for the comments!

compute crc of file in python

I want to calculate the CRC of file and get output like: E45A12AC. Here's my code:
#!/usr/bin/env python
import os, sys
import zlib
def crc(fileName):
fd = open(fileName,"rb")
content = fd.readlines()
fd.close()
for eachLine in content:
zlib.crc32(eachLine)
for eachFile in sys.argv[1:]:
crc(eachFile)
This calculates the CRC for each line, but its output (e.g. -1767935985) is not what I want.
Hashlib works the way I want, but it computes the md5:
import hashlib
m = hashlib.md5()
for line in open('data.txt', 'rb'):
m.update(line)
print m.hexdigest()
Is it possible to get something similar using zlib.crc32?
A little more compact and optimized code
def crc(fileName):
prev = 0
for eachLine in open(fileName,"rb"):
prev = zlib.crc32(eachLine, prev)
return "%X"%(prev & 0xFFFFFFFF)
PS2: Old PS is deprecated - therefore deleted -, because of the suggestion in the comment. Thank you. I don't get, how I missed this, but it was really good.
A modified version of kobor42's answer, with performance improved by a factor 2-3 by reading fixed size chunks instead of "lines":
import zlib
def crc32(fileName):
with open(fileName, 'rb') as fh:
hash = 0
while True:
s = fh.read(65536)
if not s:
break
hash = zlib.crc32(s, hash)
return "%08X" % (hash & 0xFFFFFFFF)
Also includes leading zeroes in the returned string.
hashlib-compatible interface for CRC-32 support:
import zlib
class crc32(object):
name = 'crc32'
digest_size = 4
block_size = 1
def __init__(self, arg=''):
self.__digest = 0
self.update(arg)
def copy(self):
copy = super(self.__class__, self).__new__(self.__class__)
copy.__digest = self.__digest
return copy
def digest(self):
return self.__digest
def hexdigest(self):
return '{:08x}'.format(self.__digest)
def update(self, arg):
self.__digest = zlib.crc32(arg, self.__digest) & 0xffffffff
# Now you can define hashlib.crc32 = crc32
import hashlib
hashlib.crc32 = crc32
# Python > 2.7: hashlib.algorithms += ('crc32',)
# Python > 3.2: hashlib.algorithms_available.add('crc32')
To show any integer's lowest 32 bits as 8 hexadecimal digits, without sign, you can "mask" the value by bit-and'ing it with a mask made of 32 bits all at value 1, then apply formatting. I.e.:
>>> x = -1767935985
>>> format(x & 0xFFFFFFFF, '08x')
'969f700f'
It's quite irrelevant whether the integer you are thus formatting comes from zlib.crc32 or any other computation whatsoever.
Python 3.8+ (using the walrus operator):
import zlib
def crc32(filename, chunksize=65536):
"""Compute the CRC-32 checksum of the contents of the given filename"""
with open(filename, "rb") as f:
checksum = 0
while (chunk := f.read(chunksize)) :
checksum = zlib.crc32(chunk, checksum)
return checksum
chunksize is how many bytes to read from the file at a time. You will get the same CRC for the same file no matter what you set chunksize to (it has to be > 0), but setting it too low might make your code slow, too high might use too much memory.
The result is a 32 bit integer. The CRC-32 checksum of an empty file is 0.
Edited to include Altren's solution below.
A modified and more compact version of CrouZ's answer, with a slightly improved performance, using a for loop and file buffering:
def forLoopCrc(fpath):
"""With for loop and buffer."""
crc = 0
with open(fpath, 'rb', 65536) as ins:
for x in range(int((os.stat(fpath).st_size / 65536)) + 1):
crc = zlib.crc32(ins.read(65536), crc)
return '%08X' % (crc & 0xFFFFFFFF)
Results, in a 6700k, HDD:
(Note: Retested multiple times and it was consistently faster.)
Warming up the machine...
Finished.
Beginning tests...
File size: 90288KB
Test cycles: 500
With for loop and buffer.
Result 45.24728019630359
CrouZ solution
Result 45.433838356097894
kobor42 solution
Result 104.16215688703986
Altren solution
Result 101.7247863946586
Tested in Python 3.6.4 x64 using the script below:
import os, timeit, zlib, random, binascii
def forLoopCrc(fpath):
"""With for loop and buffer."""
crc = 0
with open(fpath, 'rb', 65536) as ins:
for x in range(int((os.stat(fpath).st_size / 65536)) + 1):
crc = zlib.crc32(ins.read(65536), crc)
return '%08X' % (crc & 0xFFFFFFFF)
def crc32(fileName):
"""CrouZ solution"""
with open(fileName, 'rb') as fh:
hash = 0
while True:
s = fh.read(65536)
if not s:
break
hash = zlib.crc32(s, hash)
return "%08X" % (hash & 0xFFFFFFFF)
def crc(fileName):
"""kobor42 solution"""
prev = 0
for eachLine in open(fileName,"rb"):
prev = zlib.crc32(eachLine, prev)
return "%X"%(prev & 0xFFFFFFFF)
def crc32altren(filename):
"""Altren solution"""
buf = open(filename,'rb').read()
hash = binascii.crc32(buf) & 0xFFFFFFFF
return "%08X" % hash
fpath = r'D:\test\test.dat'
tests = {forLoopCrc: 'With for loop and buffer.',
crc32: 'CrouZ solution', crc: 'kobor42 solution',
crc32altren: 'Altren solution'}
count = 500
# CPU, HDD warmup
randomItm = [x for x in tests.keys()]
random.shuffle(randomItm)
print('\nWarming up the machine...')
for c in range(count):
randomItm[0](fpath)
print('Finished.\n')
# Begin test
print('Beginning tests...\nFile size: %dKB\nTest cycles: %d\n' % (
os.stat(fpath).st_size/1024, count))
for x in tests:
print(tests[x])
start_time = timeit.default_timer()
for c in range(count):
x(fpath)
print('Result', timeit.default_timer() - start_time, '\n')
It is faster because for loops are faster than while loops (sources: here and here).
Merge the above 2 codes as below:
try:
fd = open(decompressedFile,"rb")
except IOError:
logging.error("Unable to open the file in readmode:" + decompressedFile)
return 4
eachLine = fd.readline()
prev = 0
while eachLine:
prev = zlib.crc32(eachLine, prev)
eachLine = fd.readline()
fd.close()
There is faster and more compact way to compute CRC using binascii:
import binascii
def crc32(filename):
buf = open(filename,'rb').read()
hash = binascii.crc32(buf) & 0xFFFFFFFF
return "%08X" % hash
You can use base64 for getting out like [ERD45FTR]. And zlib.crc32 provides update options.import os, sys
import zlib
import base64
def crc(fileName):
fd = open(fileName,"rb")
content = fd.readlines()
fd.close()
prev = None
for eachLine in content:
if not prev:
prev = zlib.crc32(eachLine)
else:
prev = zlib.crc32(eachLine, prev)
return prev
for eachFile in sys.argv[1:]:
print base64.b64encode(str(crc(eachFile)))
solution:
import os, sys
import zlib
def crc(fileName, excludeLine="", includeLine=""):
try:
fd = open(fileName,"rb")
except IOError:
print "Unable to open the file in readmode:", filename
return
eachLine = fd.readline()
prev = None
while eachLine:
if excludeLine and eachLine.startswith(excludeLine):
continue
if not prev:
prev = zlib.crc32(eachLine)
else:
prev = zlib.crc32(eachLine, prev)
eachLine = fd.readline()
fd.close()
return format(prev & 0xFFFFFFFF, '08x') #returns 8 digits crc
for eachFile in sys.argv[1:]:
print crc(eachFile)
don't realy know for what is (excludeLine="", includeLine="")...

Categories