I am using savez to save the weights. Following is my code:
class vgg16:
def __init__(self, imgs1,imgs2, weights=None, sess=None):
.........
self.weight_list=[]
self.keys=[]
........
self.SaveWeights()
....neural network............
def SaveWeights(self):
tmp = file("vgg16_predict.npz",'wb')
np.savez(self,**dict(zip(self.keys, self.weight_list)))
tmp.close
I keep getting the pickling error. There are different solutions provided. But is there an easiest way to make this happen?
Here is the traceback:
Traceback (most recent call last):
File "f.py", line 350, in <module>
vgg = vgg16(imgs1,imgs2, 'vgg16_weights.npz', sess)
File "f.py", line 43, in __init__
self.SaveWeights()
File "f.py", line 339, in SaveWeights
np.savez(self,**dict(zip(self.keys, self.weight_list)))
File "/usr/local/lib/python2.7/dist-packages/numpy/lib/npyio.py", line 574, in savez
_savez(file, args, kwds, False)
File "/usr/local/lib/python2.7/dist-packages/numpy/lib/npyio.py", line 639, in _savez
pickle_kwargs=pickle_kwargs)
File "/usr/local/lib/python2.7/dist-packages/numpy/lib/format.py", line 573, in write_array
pickle.dump(array, fp, protocol=2, **pickle_kwargs)
cPickle.PicklingError: Can't pickle <type 'module'>: attribute lookup __builtin__.module failed
Exception AttributeError: "vgg16 instance has no attribute 'tell'" in <bound method ZipFile.__del__ of <zipfile.ZipFile object at 0x7f812dec99d0>> ignored
You need to pickle to a file. Just use the path directly:
np.savez("vgg16_predict.npz", **dict(zip(self.keys, self.weight_list)))
So, this should be you full method:
def SaveWeights(self):
np.savez("vgg16_predict.npz", **dict(zip(self.keys, self.weight_list)))
Related
I am trying to parse a gif file with Biopython, and am using the sample code from their website. This is the code:
from BCBio import GFF
in_file = "infile.gff"
in_handle = open(in_file)
for rec in GFF.parse(in_handle):
print(rec)
in_handle.close()
When I run the code I get the following error:
Traceback (most recent call last):
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/Bio/SeqIO/Interfaces.py", line 47, in __init__
self.stream = open(source, "r" + mode)
TypeError: expected str, bytes or os.PathLike object, not FakeHandle
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "get_genes_dpt.py", line 37, in <module>
for rec in GFF.parse(in_handle):
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/BCBio/GFF/GFFParser.py", line 746, in parse
target_lines):
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/BCBio/GFF/GFFParser.py", line 322, in parse_in_parts
for results in self.parse_simple(gff_files, limit_info, target_lines):
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/BCBio/GFF/GFFParser.py", line 343, in parse_simple
for results in self._gff_process(gff_files, limit_info, target_lines):
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/BCBio/GFF/GFFParser.py", line 637, in _gff_process
for out in self._lines_to_out_info(line_gen, limit_info, target_lines):
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/BCBio/GFF/GFFParser.py", line 699, in _lines_to_out_info
fasta_recs = self._parse_fasta(FakeHandle(line_iter))
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/BCBio/GFF/GFFParser.py", line 560, in _parse_fasta
return list(SeqIO.parse(in_handle, "fasta"))
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/Bio/SeqIO/__init__.py", line 607, in parse
return iterator_generator(handle)
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/Bio/SeqIO/FastaIO.py", line 183, in __init__
super().__init__(source, mode="t", fmt="Fasta")
File "/Users/juliofdiaz/anaconda2/envs/python37/lib/python3.7/site-packages/Bio/SeqIO/Interfaces.py", line 51, in __init__
if source.read(0) != "":
TypeError: read() takes 1 positional argument but 2 were given
I am not sure how to fix the error as it seems I am passing a str and not a FakeHandle. I am running biopython 1.78 with conda.
I use the numba.jitclass decorator to mark my class for optimization.
I don't know how to specify the signature of the run method that I want to optimize. The method takes an array of ConvertedDocument objects as an argument. It seems that numba is not able to figure out the array type on its own as the following error occurs when I try to invoke the run method in a nopython mode:
Traceback (most recent call last):
File "numba_test.py", line 53, in <module>
print run(a)
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/dispatcher.py", line 310, in _compile_for_args
raise e
numba.errors.TypingError: Caused By:
Traceback (most recent call last):
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/compiler.py", line 230, in run
stage()
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/compiler.py", line 444, in stage_nopython_frontend
self.locals)
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/compiler.py", line 800, in type_inference_stage
infer.propagate()
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/typeinfer.py", line 767, in propagate
raise errors[0]
TypingError: Internal error at <numba.typeinfer.ExhaustIterConstraint object at 0x788cc9572d50>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/typeinfer.py", line 128, in propagate
constraint(typeinfer)
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/typeinfer.py", line 264, in __call__
raise TypingError("failed to unpack {}".format(tp), loc=self.loc)
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/errors.py", line 249, in new_error_context
six.reraise(type(newerr), newerr, sys.exc_info()[2])
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/errors.py", line 243, in new_error_context
yield
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/typeinfer.py", line 264, in __call__
raise TypingError("failed to unpack {}".format(tp), loc=self.loc)
InternalError: local variable 'tp' referenced before assignment
[1] During: typing of exhaust iter at numba_test.py (40)
--%<-----------------------------------------------------------------
File "numba_test.py", line 40
Failed at nopython (nopython frontend)
Internal error at <numba.typeinfer.ExhaustIterConstraint object at 0x788cc9572d50>:
--%<-----------------------------------------------------------------
Traceback (most recent call last):
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/typeinfer.py", line 128, in propagate
constraint(typeinfer)
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/typeinfer.py", line 264, in __call__
raise TypingError("failed to unpack {}".format(tp), loc=self.loc)
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/contextlib.py", line 35, in __exit__
self.gen.throw(type, value, traceback)
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/errors.py", line 249, in new_error_context
six.reraise(type(newerr), newerr, sys.exc_info()[2])
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/errors.py", line 243, in new_error_context
yield
File "/home/clasocki/anaconda2/envs/my_numba_env/lib/python2.7/site-packages/numba/typeinfer.py", line 264, in __call__
raise TypingError("failed to unpack {}".format(tp), loc=self.loc)
InternalError: local variable 'tp' referenced before assignment
[1] During: typing of exhaust iter at numba_test.py (40)
--%<-----------------------------------------------------------------
File "numba_test.py", line 40
This error may have been caused by the following argument(s):
- argument 0: Unsupported array dtype: object
Here's how I specify the numba decorators:
spec = [
('profile', numba.typeof(numpy.asarray([1.0, 2.0]))),
('word_weights', numba.typeof(numpy.asarray([(1.0,2.0)])))
]
#numba.jitclass(spec)
class ConvertedDocument(object):
def __init__(self, profile, word_weights):
self.profile = profile
self.word_weights = word_weights
#numba.jit(nopython=True,cache=True)
def run(docs):
s = 0
for doc in docs: #array of documents
for w_id, weight in doc.word_weights: #accessing document's property
s += weight
return s
And this is the way the run method is called:
x = numpy.asarray([1.0, 2.0])
y = numpy.asarray([(1.0,2.0), (3.0,4.0)])
a = numpy.asarray([ConvertedDocument(x,y)])
print run(a)
If the a numpy array is replaced with a Python list, the exception is as follows:
Failed at nopython (nopython mode backend)
reflected list(instance.jitclass.ConvertedDocument#3bffb70<profile:array(float64, 1d, C),word_weights:array(float64, 2d, C)>): unsupported nested memory-managed object
Does anyone know how to specify a method signature when custom types are used or whether the iteration over an array of objects is supported or not?
The problem appears to be that you can't call np.nditer on a jitclass object, which makes sense since a jitclass isn't iterable. It stores it's data as a struct of arrays (and other data types) rather than an array of structs. You are attempting to use it as the latter. It would be ambiguous as to how you'd iterate over the jitclass object if in addition to your two array attributes you also had a bunch of scalar data attributes or arrays of differing sizes.
The error message is admittedly unclear. My advice would to iterate over the index of word_weights you need directly.
I have the following code, which decorates the class:
import dill
from collections import namedtuple
from multiprocessing import Process
def proxified(to_be_proxied):
b = namedtuple('d', [])
class Proxy(to_be_proxied, b):
pass
Proxy.__name__ = to_be_proxied.__name__
return Proxy
#proxified
class MyClass:
def f(self):
print('hello!')
pickled_cls = dill.dumps(MyClass)
def new_process(clazz):
dill.loads(clazz)().f()
p = Process(target=new_process, args=(pickled_cls,))
p.start()
p.join()
When I am trying to pickle decorated class I am getting the following error:
Traceback (most recent call last):
File "/usr/lib/python3.5/pickle.py", line 907, in save_global
obj2, parent = _getattribute(module, name)
File "/usr/lib/python3.5/pickle.py", line 265, in _getattribute
.format(name, obj))
AttributeError: Can't get local attribute 'proxified.<locals>.Proxy' on <function proxified at 0x7fbf7de4b8c8>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/carbolymer/example.py", line 108, in <module>
pickled_cls = dill.dumps(MyClass)
File "/usr/lib/python3.5/site-packages/dill/dill.py", line 243, in dumps
dump(obj, file, protocol, byref, fmode, recurse)#, strictio)
File "/usr/lib/python3.5/site-packages/dill/dill.py", line 236, in dump
pik.dump(obj)
File "/usr/lib/python3.5/pickle.py", line 408, in dump
self.save(obj)
File "/usr/lib/python3.5/pickle.py", line 475, in save
f(self, obj) # Call unbound method with explicit self
File "/usr/lib/python3.5/site-packages/dill/dill.py", line 1189, in save_type
StockPickler.save_global(pickler, obj)
File "/usr/lib/python3.5/pickle.py", line 911, in save_global
(obj, module_name, name))
_pickle.PicklingError: Can't pickle <class '__main__.proxified.<locals>.Proxy'>: it's not found as __main__.proxified.<locals>.Proxy
How can I pickle the decorated class using dill? I would like pass to pass this class to a separate process as an argument - maybe is there a simpler way to do it?
A good explanation of "Why pickling decorated functions is painful", provided by Gaƫl Varoquaux can be found here.
Basically rewriting the class using functools.wraps can avoid these issues :)
Traceback (most recent call last):
File "E:\blahblahblah\emailsend.py", line 26, in <module>
msg.attach(MIMEText(file))
File "E:\blahblahblah\Python 2.7.11\lib\email\mime\text.py", line 30, in __init__
self.set_payload(_text, _charset)
File "E:\blahblahblah\Python 2.7.11\lib\email\message.py", line 226, in set_payload
self.set_charset(charset)
File "E:\blahblahblah\Python 2.7.11\lib\email\message.py", line 268, in set_charset
cte(self)
File "E:\blahblahblah\Python 2.7.11\lib\email\encoders.py", line 73, in encode_7or8bit
orig.encode('ascii')
AttributeError: 'file' object has no attribute 'encode'https://stackoverflow.com/questions/ask#
I've been looking this up a lot but I haven't found an answer.
The only important parts of the code is this:
file = open('newfile.txt')
msg.attach(MIMEText(file))
There are other parts but I've debugged it and I get the error at the 'msg.attach(MIMEText(file))' line.
Any help?
MIMEText takes the content of the file, not the file object.
msg.attach(MIMEText(open("newfile.txt").read()))
I'm not able to pickle a pyzabbix.ZabbixAPI class instance with the following code:
from pyzabbix import ZabbixAPI
from pickle import dumps
api = ZabbixAPI('http://platform.autuitive.com/monitoring/')
print dumps(api)
It results in the following error:
Traceback (most recent call last):
File "zabbix_test.py", line 8, in <module>
print dumps(api)
File "/usr/lib/python2.7/pickle.py", line 1374, in dumps
Pickler(file, protocol).dump(obj)
File "/usr/lib/python2.7/pickle.py", line 224, in dump
self.save(obj)
File "/usr/lib/python2.7/pickle.py", line 306, in save
rv = reduce(self.proto)
File "/usr/lib/python2.7/copy_reg.py", line 84, in _reduce_ex
dict = getstate()
TypeError: 'ZabbixAPIObjectClass' object is not callable
Well, in the documentation it says:
instances of such classes whose __dict__ or the result of calling
__getstate__() is picklable (see section The pickle protocol for details).
And it would appear this class isn't one. If you really need to do this, then consider writing your own pickling routines.