I'm trying to implement an encryption/decryption function in python2.
This is the encryption scheme:
However, I'm getting a 'method-wrapper' object is not iterable error in the AES CTR function of the pycrypto library
This is the stacktrace:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-1c44f21fcf83> in <module>()
45 l = random.randint(16,48)
46 m = random_bytes(l)
---> 47 assert specialDecryption(k, specialEncryption(k, m)) == m
<ipython-input-5-1c44f21fcf83> in specialEncryption(k, m)
7 # compute PRF
8 r = random_bytes(KEYLENGTH/8)
----> 9 prf = lengthQuadruplingPRF(k, r)
10
11 # xor
<ipython-input-4-59fb6141461b> in lengthQuadruplingPRF(k, r)
34 assert len(k) == KEYLENGTH/8
35 assert len(r) <= KEYLENGTH/8
---> 36 obj = AES.new(k, AES.MODE_CTR, counter=make_counter())
37 output = obj.encrypt(r*4)
38 return output
/usr/local/lib/python2.7/site-packages/Crypto/Cipher/AES.pyc in new(key, mode, *args, **kwargs)
204
205 kwargs["add_aes_modes"] = True
--> 206 return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs)
207
208
/usr/local/lib/python2.7/site-packages/Crypto/Cipher/__init__.pyc in _create_cipher(factory, key, mode, *args, **kwargs)
77 raise TypeError("IV is not meaningful for the ECB mode")
78
---> 79 return modes[mode](factory, **kwargs)
/usr/local/lib/python2.7/site-packages/Crypto/Cipher/_mode_ctr.pyc in _create_ctr_cipher(factory, **kwargs)
323 # 'counter' used to be a callable object, but now it is
324 # just a dictionary for backward compatibility.
--> 325 _counter = dict(counter)
326 try:
327 counter_len = _counter.pop("counter_len")
TypeError: 'method-wrapper' object is not iterable
Here's the code:
if __name__ == '__main__':
k = os.urandom(KEYLENGTH/8) # generate key
l = random.randint(16,48)
m = os.urandom(l)
c = specialEncryption(k, m) ## FIRST IN THE FAILURE STACK
def specialEncryption(k, m):
... other code
# compute PRF
r = os.urandom(KEYLENGTH/8)
prf = lengthQuadruplingPRF(k, r) ## SECOND IN THE FAIL STACK
... other code
def make_counter():
import struct
def gen():
i = 0;
while True:
yield struct.pack('>QQ', 0, i)
i += 1
return gen().next
def lengthQuadruplingPRF(k, r):
# Input: 16 byte key, 16 byte value
# Output: 64 byte pseudorandom bytes
obj = AES.new(k, AES.MODE_CTR, counter=make_counter()) ## FAILS HERE
output = obj.encrypt(r*4)
return output
Your counter should be an iterable capable of initializing a dict, not a bound method.
I suspect changing your make_counter function to:
return gen()
from:
return gen().next
is enough to fix it.
Related
I'm running jupyter lab on windows and fastai.vision.utils.verify_images(fns) is giving me problems because it calls fastcore.parallel.parallel with default n_workers=8. There are many ways around it, but I was trying to figure out a code block that I could slap in any notebook and have it so all underlying calls to parallel will run with n_workers=1.
I tried the following cell:
import fastcore
import sys
_fastcore = fastcore
_parallel = lambda *args, **kwargs: fastcore.parallel.parallel(*args, **kwargs, n_workers=1)
_fastcore.parallel.parallel = _parallel
sys.modules['fastcore'] = _fastcore
fastcore.parallel.parallel
printing
<function __main__.<lambda>(*args, **kwargs)>
but when I try running verify_images it still fails as if the patch never happened
---------------------------------------------------------------------------
BrokenProcessPool Traceback (most recent call last)
<ipython-input-37-f1773f2c9e62> in <module>
3 # from mock import patch
4 # with patch('fastcore.parallel.parallel') as _parallel:
----> 5 failed = verify_images(fns)
6 # failed = L(fns[i] for i,o in enumerate(_parallel(verify_image, fns)) if not o)
7 failed
~\anaconda3\lib\site-packages\fastai\vision\utils.py in verify_images(fns)
59 def verify_images(fns):
60 "Find images in `fns` that can't be opened"
---> 61 return L(fns[i] for i,o in enumerate(parallel(verify_image, fns)) if not o)
62
63 # Cell
~\anaconda3\lib\site-packages\fastcore\parallel.py in parallel(f, items, n_workers, total, progress, pause, threadpool, timeout, chunksize, *args, **kwargs)
121 if total is None: total = len(items)
122 r = progress_bar(r, total=total, leave=False)
--> 123 return L(r)
124
125 # Cell
~\anaconda3\lib\site-packages\fastcore\foundation.py in __call__(cls, x, *args, **kwargs)
95 def __call__(cls, x=None, *args, **kwargs):
96 if not args and not kwargs and x is not None and isinstance(x,cls): return x
---> 97 return super().__call__(x, *args, **kwargs)
98
99 # Cell
~\anaconda3\lib\site-packages\fastcore\foundation.py in __init__(self, items, use_list, match, *rest)
103 def __init__(self, items=None, *rest, use_list=False, match=None):
104 if (use_list is not None) or not is_array(items):
--> 105 items = listify(items, *rest, use_list=use_list, match=match)
106 super().__init__(items)
107
~\anaconda3\lib\site-packages\fastcore\basics.py in listify(o, use_list, match, *rest)
54 elif isinstance(o, list): res = o
55 elif isinstance(o, str) or is_array(o): res = [o]
---> 56 elif is_iter(o): res = list(o)
57 else: res = [o]
58 if match is not None:
~\anaconda3\lib\concurrent\futures\process.py in _chain_from_iterable_of_lists(iterable)
482 careful not to keep references to yielded objects.
483 """
--> 484 for element in iterable:
485 element.reverse()
486 while element:
~\anaconda3\lib\concurrent\futures\_base.py in result_iterator()
609 # Careful not to keep a reference to the popped future
610 if timeout is None:
--> 611 yield fs.pop().result()
612 else:
613 yield fs.pop().result(end_time - time.monotonic())
~\anaconda3\lib\concurrent\futures\_base.py in result(self, timeout)
437 raise CancelledError()
438 elif self._state == FINISHED:
--> 439 return self.__get_result()
440 else:
441 raise TimeoutError()
~\anaconda3\lib\concurrent\futures\_base.py in __get_result(self)
386 def __get_result(self):
387 if self._exception:
--> 388 raise self._exception
389 else:
390 return self._result
BrokenProcessPool: A process in the process pool was terminated abruptly while the future was running or pending.
I suspect it has to do with fastai.vision.utils using * imports for fastcore. Is there a way to achieve what I want?
Since the parallel function has already been imported into the fastai.vision.utils module, the correct way is to monkeypatch that module rather than fastcore.parallel:
... # your code for custom `parallel` function goes here
import fastai.vision.utils
fastai.vision.utils.parallel = _parallel # assign your custom function here
I am trying to edit my video using moviepy. And when I want to cut a part of it I get error:
AttributeError: 'str' object has no attribute 'duration'
Why ?
from moviepy.editor import *
clip0 = VideoFileClip('08.mkv')
clip0 = clip0.set_audio(f'../Rus_sound/08.mkv'[:-3] + 'mp3')
end = 0
start = 0
lista = [0.4,0.6]
movie1 = '08.mkv'
movie2 = '../Bubble_Background_Video3.mp4'
clip0 = VideoFileClip(movie1)
audio = f'../Rus_sound/{movie1}'[:-3] + 'mp3'
clip1 = clip0.set_audio(audio)
w = clip1.w
h = clip1.h
fps = clip1.fps
clip2 = VideoFileClip(movie2).resize(height=h, width=w).set_fps(fps)
durata = clip1.duration - end
lista = [start] + [i*durata for i in lista ] + [durata]
stocked = []
for i in range(1, len(lista)):
o = i-1
clip = clip1.subclip(lista[o], lista[i])
stocked.append(clip)
if i != len(lista)-1:
stocked.append(clip2)
clip = concatenate_videoclips(stocked, method='compose')
This is my Error traceback:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-11-42faa818ba3e> in <module>
----> 1 clip = clip1.subclip(0, 449.241)
<decorator-gen-152> in subclip(self, t_start, t_end)
~/Anaconda3/lib/python3.8/site-packages/moviepy/decorators.py in wrapper(f, *a, **kw)
87 new_kw = {k: fun(v) if k in varnames else v
88 for (k,v) in kw.items()}
---> 89 return f(*new_a, **new_kw)
90 return decorator.decorator(wrapper)
91
<decorator-gen-151> in subclip(self, t_start, t_end)
~/Anaconda3/lib/python3.8/site-packages/moviepy/decorators.py in apply_to_mask(f, clip, *a, **k)
27 the clip created with f """
28
---> 29 newclip = f(clip, *a, **k)
30 if getattr(newclip, 'mask', None):
31 newclip.mask = f(newclip.mask, *a, **k)
<decorator-gen-150> in subclip(self, t_start, t_end)
~/Anaconda3/lib/python3.8/site-packages/moviepy/decorators.py in apply_to_audio(f, clip, *a, **k)
41 newclip = f(clip, *a, **k)
42 if getattr(newclip, 'audio', None):
---> 43 newclip.audio = f(newclip.audio, *a, **k)
44 return newclip
45
~/Anaconda3/lib/python3.8/site-packages/moviepy/Clip.py in subclip(self, t_start, t_end)
382 t_start = self.duration + t_start # Remember t_start is negative
383
--> 384 if (self.duration is not None) and (t_start > self.duration):
385 raise ValueError("t_start (%.02f) " % t_start +
386 "should be smaller than the clip's " +
AttributeError: 'str' object has no attribute 'duration'
The PROBLEM here, which would have been clear if you read the documentation, is that the set_audio function does not take a string. It takes an AudioFileClip object. moviepy is smart enough not to do the actual work at that point; it just remembers what you wanted for audio. Later, when you try to use that clip, it tries to look up the audio file's duration, and it finds a string where it expected an object.
clip1 = clip0.set_audio(AudioFileClip(audio))
I tried to run multiprocessing with a large dataset.
when i run below script with for loop, the total run time is 1.5 sec.
def get_vars(accessCode, user_profile, wt, meals, instance_method='get_wt_adherence'):
'''
Examples
--------
>> n_cpus = multiprocessing.cpu_count()
>> get_wt_adherence = partial(get_vars, user_profile, wt, meals,
instance_method='get_wt_adherence')
>> pool = multiprocessing.Pool(n_cpus-5)
>> result = pool.map(get_wt_adherence, accessCodes)
>> concated_result = pd.concat(result)
Version
-------
# 2020.03.26 Updated
: Class name edited. 'NOOM' -> 'DATA_GEN'
'''
#
COL_WEEK = ['{}week'.format(i) for i in range(1, 17)]
data_gen = DATA_GEN(accessCode, user_profile, wt, meals)
if instance_method == 'get_wt_adherence':
func = data_gen.get_wt_adherence
elif instance_method == 'get_meal_adherence':
func = data_gen.get_meal_adherence
elif instance_method == 'get_color_food':
func = data_gen.get_color_food
elif instance_method == 'get_daily_cal':
func = data_gen.get_daily_cal
row = pd.DataFrame([func(weeks) for weeks in range(1, 17)]).T
row.columns = COL_WEEK
row['accessCode'] = accessCode
return row
from noom.handler import DATA_GEN
from functools import partial
import multiprocessing
# start_time = time.time()
get_wt = partial(get_vars, user_profile=user_profile, wt=wt_logs, meals=meals, instance_method='get_wt_adherence')
for i in range(10):
get_wt(accessCodes[i])
however, when i tried to run this script usign multiprocessing, the script was not responded
Even, 'accessCodes' is list which has 100 elements.
I suspect the 'get_wt' function using partial module.
n_cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(n_cpus-15)
result_wt = pool.map(get_wt, accessCodes) ; print('wt adherence finished')
pool.close()
How to solve this problem?
the error is below
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-22-73ddf2e21bbd> in <module>
2 n_cpus = multiprocessing.cpu_count()
3 pool = multiprocessing.Pool(n_cpus-15)
----> 4 result_wt = pool.map(get_wt_adherence, accessCodes[1:10]) ; print('wt adherence finished')
5 pool.close()
6 time.time() - start_time
/usr/lib/python3.6/multiprocessing/pool.py in map(self, func, iterable, chunksize)
264 in a list that is returned.
265 '''
--> 266 return self._map_async(func, iterable, mapstar, chunksize).get()
267
268 def starmap(self, func, iterable, chunksize=None):
/usr/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
642 return self._value
643 else:
--> 644 raise self._value
645
646 def _set(self, i, obj):
/usr/lib/python3.6/multiprocessing/pool.py in _handle_tasks(taskqueue, put, outqueue, pool, cache)
422 break
423 try:
--> 424 put(task)
425 except Exception as e:
426 job, idx = task[:2]
/usr/lib/python3.6/multiprocessing/connection.py in send(self, obj)
204 self._check_closed()
205 self._check_writable()
--> 206 self._send_bytes(_ForkingPickler.dumps(obj))
207
208 def recv_bytes(self, maxlength=None):
/usr/lib/python3.6/multiprocessing/connection.py in _send_bytes(self, buf)
391 n = len(buf)
392 # For wire compatibility with 3.2 and lower
--> 393 header = struct.pack("!i", n)
394 if n > 16384:
395 # The payload is large so Nagle's algorithm won't be triggered
error: 'i' format requires -2147483648 <= number <= 2147483647
My name is Boyu. I am a college student and newbie in python and Gurobi. Currently, one step of my model is solving 5 independent LPs. These LPs are independent and each has the same number of variables and constraints. The only difference between these LPs is the values of the coefficient and they are all known before running the model.
First, I start building 5 LPs sequentially:
from gurobipy import *
from gurobipy import GRB
a={1:2,2:2,3:8,4:7,5:3}
b={1:3,2:5,3:6,4:8,5:5}
c={1:4,2:2,3:3,4:5,5:7}
d={1:1,2:7,3:3,4:2,5:9}
object_val={}
x={}
y={}
z={}
m={}
for i in [1,2,3,4,5]:
# Create a new model
m[i]=Model()
# Create variables
x[i] = m[i].addVar(vtype=GRB.CONTINUOUS)
y[i] = m[i].addVar(vtype=GRB.CONTINUOUS)
z[i] = m[i].addVar(vtype=GRB.CONTINUOUS)
# Set objective
m[i].setObjective(x[i] + y[i] + 2 * z[i] , GRB.MAXIMIZE)
# Add constraint: x + a y + b z <= c
m[i].addConstr(x[i] + a[i] * y[i] + b[i] * z[i] <= c[i])
# Add constraint: x + y >= 1
m[i].addConstr(x[i] + y[i] >= d[i])
Second, I defined the function to solve a single LP model and save it as "test.py":
def test(i):
# Optimize model
m=i[1]
m.optimize()
return m.objVal
Third, I create the input data for the function will solved by parallel:
inputs=[]
for i in [1,2,3,4,5]:
inputs.append([i,m[i]])
Finally, I tried to use "multiprocessing" package to solve these 5 LPs in parallel:
import test
import multiprocessing
if __name__ == '__main__':
pool = multiprocessing.Pool(processes=4)
pool.map(test.test, inputs)
pool.close()
pool.join()
print('done')
However, an error occurs, it said "KeyError: 'getstate'"
KeyError Traceback (most recent call last)
<ipython-input-17-0b3639c06eb3> in <module>()
1 if __name__ == '__main__':
2 pool = multiprocessing.Pool(processes=4)
----> 3 pool.map(test.test, inputs)
4 pool.close()
5 pool.join()
C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in map(self, func, iterable, chunksize)
264 in a list that is returned.
265 '''
--> 266 return self._map_async(func, iterable, mapstar, chunksize).get()
267
268 def starmap(self, func, iterable, chunksize=None):
C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in get(self, timeout)
642 return self._value
643 else:
--> 644 raise self._value
645
646 def _set(self, i, obj):
C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in _handle_tasks(taskqueue, put, outqueue, pool, cache)
422 break
423 try:
--> 424 put(task)
425 except Exception as e:
426 job, idx = task[:2]
C:\ProgramData\Anaconda3\lib\multiprocessing\connection.py in send(self, obj)
204 self._check_closed()
205 self._check_writable()
--> 206 self._send_bytes(_ForkingPickler.dumps(obj))
207
208 def recv_bytes(self, maxlength=None):
C:\ProgramData\Anaconda3\lib\multiprocessing\reduction.py in dumps(cls, obj, protocol)
49 def dumps(cls, obj, protocol=None):
50 buf = io.BytesIO()
---> 51 cls(buf, protocol).dump(obj)
52 return buf.getbuffer()
53
model.pxi in gurobipy.Model.__getattr__()
KeyError: '__getstate__'
Could anybody give me some help for that? I am a newbie for gurobi and python and it will be really really appreciated if someone can give me some help.
Thanks.
Boyu
You need to create a separate environment for each model instance.
# Assuming: import gurobipy as gp
m[i] = gp.Model(env=gp.Env(""))
For further reference:
https://groups.google.com/forum/#!topic/gurobi/_LztwSqj-14
https://www.gurobi.com/documentation/9.0/refman/py_env2.html
I have a matlab function file called 'calculate_K_matrix.m
' which contains the following code:
function K = calculate_K_matrix(A, B, n)
K = place(A, B, eigs(A)*n)
end
I can call this from matlab like so:
addpath('/home/ash/Dropbox/SimulationNotebooks/Control')
A = [0 1 ;-100 -5]
B = [0 ; 7]
n = 1.1 % how aggressive feedback is
K = calculate_K_matrix(A, B, n)
but when I try to call this from python using the matlab engine API like so:
import matlab
import matlab.engine
eng = matlab.engine.start_matlab()
A = matlab.double([[0, 1],[-100, -5]])
B = matlab.double([[0],[7]])
n = 1.1 double(param initializer=None, param size=None, param is_complex=False)
n_matlab = matlab.double([n])
eng.addpath(r'/home/ash/Dropbox/SimulationNotebooks/Control')
K = eng.calculate_K_matrix(A, B, n_matlab)
Then I get the following error:
In [17]: run test.py
Attempt to execute SCRIPT calculate_K_matrix as a function:
/home/ash/Dropbox/SimulationNotebooks/Control/calculate_K_matrix.m
---------------------------------------------------------------------------
MatlabExecutionError Traceback (most recent call last)
/home/ash/Dropbox/SimulationNotebooks/Control/test.py in <module>()
10
11 eng.addpath(r'/home/ash/Dropbox/SimulationNotebooks/Control')
---> 12 K = eng.calculate_K_matrix(A, B, n_matlab)
/home/ash/anaconda2/envs/python3/lib/python3.5/site-packages/matlab/engine/matlabengine.py in __call__(self, *args, **kwargs)
76 else:
77 return FutureResult(self._engine(), future, nargs, _stdout,
---> 78 _stderr, feval=True).result()
79
80 def __validate_engine(self):
/home/ash/anaconda2/envs/python3/lib/python3.5/site-packages/matlab/engine/futureresult.py in result(self, timeout)
66 raise TypeError(pythonengine.getMessage('TimeoutCannotBeNegative'))
67
---> 68 return self.__future.result(timeout)
69
70 def cancel(self):
/home/ash/anaconda2/envs/python3/lib/python3.5/site-packages/matlab/engine/fevalfuture.py in result(self, timeout)
80 raise TimeoutError(pythonengine.getMessage('MatlabFunctionTimeout'))
81
---> 82 self._result = pythonengine.getFEvalResult(self._future,self._nargout, None, out=self._out, err=self._err)
83 self._retrieved = True
84 return self._result
MatlabExecutionError: Attempt to execute SCRIPT calculate_K_matrix as a function:
/home/ash/Dropbox/SimulationNotebooks/Control/calculate_K_matrix.m
How can I solve this issue?
Use getattr, like:
import matlab.engine
engine = matlab.engine.start_matlab()
engine.cd('<your path>')
getattr(engine, 'calculate_K_matrix')(A, B, n, nargout=0)
Thats how I do it:
import matlab.engine, sys, cmd, logging
class MatlabShell(cmd.Cmd):
prompt = '>>> '
file = None
def __init__(self, engine = None, completekey='tab', stdin=None, stdout=None):
if stdin is not None:
self.stdin = stdin
else:
self.stdin = sys.stdin
if stdout is not None:
self.stdout = stdout
else:
self.stdout = sys.stdout
self.cmdqueue = []
self.completekey = completekey
if engine == None:
try:
print('Matlab Shell v.1.0')
print('Starting matlab...')
self.engine = matlab.engine.start_matlab()
print('\n')
except:
logging.exception('>>> STARTUP FAILED')
input()
else:
self.engine = engine
self.cmdloop()
def do_run(self, line):
try:
getattr(self.engine, line)(nargout=0)
except matlab.engine.MatlabExecutionError:
pass
def default(self, line):
try:
getattr(self.engine, 'eval')(line, nargout=0)
except matlab.engine.MatlabExecutionError:
pass
if __name__ == "__main__":
MatlabShell()
pictures:
Result
function