Solving multiple independent LPs parallel in python and key error occurs - python

My name is Boyu. I am a college student and newbie in python and Gurobi. Currently, one step of my model is solving 5 independent LPs. These LPs are independent and each has the same number of variables and constraints. The only difference between these LPs is the values of the coefficient and they are all known before running the model.
First, I start building 5 LPs sequentially:
from gurobipy import *
from gurobipy import GRB
a={1:2,2:2,3:8,4:7,5:3}
b={1:3,2:5,3:6,4:8,5:5}
c={1:4,2:2,3:3,4:5,5:7}
d={1:1,2:7,3:3,4:2,5:9}
object_val={}
x={}
y={}
z={}
m={}
for i in [1,2,3,4,5]:
# Create a new model
m[i]=Model()
# Create variables
x[i] = m[i].addVar(vtype=GRB.CONTINUOUS)
y[i] = m[i].addVar(vtype=GRB.CONTINUOUS)
z[i] = m[i].addVar(vtype=GRB.CONTINUOUS)
# Set objective
m[i].setObjective(x[i] + y[i] + 2 * z[i] , GRB.MAXIMIZE)
# Add constraint: x + a y + b z <= c
m[i].addConstr(x[i] + a[i] * y[i] + b[i] * z[i] <= c[i])
# Add constraint: x + y >= 1
m[i].addConstr(x[i] + y[i] >= d[i])
Second, I defined the function to solve a single LP model and save it as "test.py":
def test(i):
# Optimize model
m=i[1]
m.optimize()
return m.objVal
Third, I create the input data for the function will solved by parallel:
inputs=[]
for i in [1,2,3,4,5]:
inputs.append([i,m[i]])
Finally, I tried to use "multiprocessing" package to solve these 5 LPs in parallel:
import test
import multiprocessing
if __name__ == '__main__':
pool = multiprocessing.Pool(processes=4)
pool.map(test.test, inputs)
pool.close()
pool.join()
print('done')
However, an error occurs, it said "KeyError: 'getstate'"
KeyError Traceback (most recent call last)
<ipython-input-17-0b3639c06eb3> in <module>()
1 if __name__ == '__main__':
2 pool = multiprocessing.Pool(processes=4)
----> 3 pool.map(test.test, inputs)
4 pool.close()
5 pool.join()
C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in map(self, func, iterable, chunksize)
264 in a list that is returned.
265 '''
--> 266 return self._map_async(func, iterable, mapstar, chunksize).get()
267
268 def starmap(self, func, iterable, chunksize=None):
C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in get(self, timeout)
642 return self._value
643 else:
--> 644 raise self._value
645
646 def _set(self, i, obj):
C:\ProgramData\Anaconda3\lib\multiprocessing\pool.py in _handle_tasks(taskqueue, put, outqueue, pool, cache)
422 break
423 try:
--> 424 put(task)
425 except Exception as e:
426 job, idx = task[:2]
C:\ProgramData\Anaconda3\lib\multiprocessing\connection.py in send(self, obj)
204 self._check_closed()
205 self._check_writable()
--> 206 self._send_bytes(_ForkingPickler.dumps(obj))
207
208 def recv_bytes(self, maxlength=None):
C:\ProgramData\Anaconda3\lib\multiprocessing\reduction.py in dumps(cls, obj, protocol)
49 def dumps(cls, obj, protocol=None):
50 buf = io.BytesIO()
---> 51 cls(buf, protocol).dump(obj)
52 return buf.getbuffer()
53
model.pxi in gurobipy.Model.__getattr__()
KeyError: '__getstate__'
Could anybody give me some help for that? I am a newbie for gurobi and python and it will be really really appreciated if someone can give me some help.
Thanks.
Boyu

You need to create a separate environment for each model instance.
# Assuming: import gurobipy as gp
m[i] = gp.Model(env=gp.Env(""))
For further reference:
https://groups.google.com/forum/#!topic/gurobi/_LztwSqj-14
https://www.gurobi.com/documentation/9.0/refman/py_env2.html

Related

instance methods in function during Multiprocessing (python)

I tried to run multiprocessing with a large dataset.
when i run below script with for loop, the total run time is 1.5 sec.
def get_vars(accessCode, user_profile, wt, meals, instance_method='get_wt_adherence'):
'''
Examples
--------
>> n_cpus = multiprocessing.cpu_count()
>> get_wt_adherence = partial(get_vars, user_profile, wt, meals,
instance_method='get_wt_adherence')
>> pool = multiprocessing.Pool(n_cpus-5)
>> result = pool.map(get_wt_adherence, accessCodes)
>> concated_result = pd.concat(result)
Version
-------
# 2020.03.26 Updated
: Class name edited. 'NOOM' -> 'DATA_GEN'
'''
#
COL_WEEK = ['{}week'.format(i) for i in range(1, 17)]
data_gen = DATA_GEN(accessCode, user_profile, wt, meals)
if instance_method == 'get_wt_adherence':
func = data_gen.get_wt_adherence
elif instance_method == 'get_meal_adherence':
func = data_gen.get_meal_adherence
elif instance_method == 'get_color_food':
func = data_gen.get_color_food
elif instance_method == 'get_daily_cal':
func = data_gen.get_daily_cal
row = pd.DataFrame([func(weeks) for weeks in range(1, 17)]).T
row.columns = COL_WEEK
row['accessCode'] = accessCode
return row
from noom.handler import DATA_GEN
from functools import partial
import multiprocessing
# start_time = time.time()
get_wt = partial(get_vars, user_profile=user_profile, wt=wt_logs, meals=meals, instance_method='get_wt_adherence')
for i in range(10):
get_wt(accessCodes[i])
however, when i tried to run this script usign multiprocessing, the script was not responded
Even, 'accessCodes' is list which has 100 elements.
I suspect the 'get_wt' function using partial module.
n_cpus = multiprocessing.cpu_count()
pool = multiprocessing.Pool(n_cpus-15)
result_wt = pool.map(get_wt, accessCodes) ; print('wt adherence finished')
pool.close()
How to solve this problem?
the error is below
---------------------------------------------------------------------------
error Traceback (most recent call last)
<ipython-input-22-73ddf2e21bbd> in <module>
2 n_cpus = multiprocessing.cpu_count()
3 pool = multiprocessing.Pool(n_cpus-15)
----> 4 result_wt = pool.map(get_wt_adherence, accessCodes[1:10]) ; print('wt adherence finished')
5 pool.close()
6 time.time() - start_time
/usr/lib/python3.6/multiprocessing/pool.py in map(self, func, iterable, chunksize)
264 in a list that is returned.
265 '''
--> 266 return self._map_async(func, iterable, mapstar, chunksize).get()
267
268 def starmap(self, func, iterable, chunksize=None):
/usr/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
642 return self._value
643 else:
--> 644 raise self._value
645
646 def _set(self, i, obj):
/usr/lib/python3.6/multiprocessing/pool.py in _handle_tasks(taskqueue, put, outqueue, pool, cache)
422 break
423 try:
--> 424 put(task)
425 except Exception as e:
426 job, idx = task[:2]
/usr/lib/python3.6/multiprocessing/connection.py in send(self, obj)
204 self._check_closed()
205 self._check_writable()
--> 206 self._send_bytes(_ForkingPickler.dumps(obj))
207
208 def recv_bytes(self, maxlength=None):
/usr/lib/python3.6/multiprocessing/connection.py in _send_bytes(self, buf)
391 n = len(buf)
392 # For wire compatibility with 3.2 and lower
--> 393 header = struct.pack("!i", n)
394 if n > 16384:
395 # The payload is large so Nagle's algorithm won't be triggered
error: 'i' format requires -2147483648 <= number <= 2147483647

Dask getting "FileNotFoundError: [Errno 2] No such file or directory" in the middle of a file

I'm making a bag from a plain txt file - it's got a bunch of reviews, delimited by two newlines. But, sometimes - and I really can't predict when - it gives me FileNotFoundError: [Errno 2] No such file or directory: '/mnt/c/Workspaces/Books/Dask/foods.txt' while processing it
Here's the actual code
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import dask.dataframe as dd
from dask.diagnostics import ProgressBar
import numpy as np
import dask.bag as bag
import os
def get_next_part(file, start_index, span_index=0, blocksize=1000):
file.seek(start_index)
buffer = file.read(blocksize + span_index).decode('cp1252')
delimiter_position = buffer.find('\n\n')
if delimiter_position == -1:
return get_next_part(file, start_index, span_index + blocksize)
else:
file.seek(start_index)
return start_index, delimiter_position
def get_item(filename, start_index, delimiter_position, encoding='cp1252'):
with open(filename, 'rb') as file_handle:
file_handle.seek(start_index)
text = file_handle.read(delimiter_position).decode(encoding)
return dict((element.split(': ')[0], element.split(': ')[1])
if len(element.split(': ')) > 1
else ('unknown', element)
for element in text.strip().split('\n'))
with open(f"{os.getcwd()}/foods.txt", 'rb') as file_handle:
size = file_handle.seek(0,2) - 1
more_data = True
output = []
current_position = next_position = 0
while more_data:
if current_position >= size:
more_data = False
else:
current_position, next_position = get_next_part(file_handle, current_position, 0)
output.append((current_position, next_position))
current_position = current_position + next_position + 2
with ProgressBar():
reviews = (bag.from_sequence(output, npartitions=104)
.map(lambda x: get_item(f"{os.getcwd()}/foods.txt",
x[0],
x[1]))
.compute())
Sometimes it works fine, but other times it gives me something along these lines (different percentage every time):
[########## ] | 26% Completed | 54.3s
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-1-90a316620d10> in <module>()
42 with ProgressBar():
43 reviews = (bag.from_sequence(output, npartitions=104)
---> 44 .map(lambda x: get_item(f"{os.getcwd()}/foods.txt",
45 x[0],
46 x[1]))
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/base.py in compute(self, **kwargs)
154 dask.base.compute
155 """
--> 156 (result,) = compute(self, traverse=False, **kwargs)
157 return result
158
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/base.py in compute(*args, **kwargs)
396 keys = [x.__dask_keys__() for x in collections]
397 postcomputes = [x.__dask_postcompute__() for x in collections]
--> 398 results = schedule(dsk, keys, **kwargs)
399 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
400
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/multiprocessing.py in get(dsk, keys, num_workers, func_loads, func_dumps, optimize_graph, pool, **kwargs)
190 get_id=_process_get_id, dumps=dumps, loads=loads,
191 pack_exception=pack_exception,
--> 192 raise_exception=reraise, **kwargs)
193 finally:
194 if cleanup:
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/local.py in get_async(apply_async, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, **kwargs)
460 _execute_task(task, data) # Re-execute locally
461 else:
--> 462 raise_exception(exc, tb)
463 res, worker_id = loads(res_info)
464 state['cache'][key] = res
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/compatibility.py in reraise(exc, tb)
109 def reraise(exc, tb=None):
110 if exc.__traceback__ is not tb:
--> 111 raise exc.with_traceback(tb)
112 raise exc
113
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/local.py in execute_task()
228 try:
229 task, data = loads(task_info)
--> 230 result = _execute_task(task, data)
231 id = get_id()
232 result = dumps((result, id))
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/core.py in _execute_task()
117 func, args = arg[0], arg[1:]
118 args2 = [_execute_task(a, cache) for a in args]
--> 119 return func(*args2)
120 elif not ishashable(arg):
121 return arg
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/bag/core.py in reify()
1589 def reify(seq):
1590 if isinstance(seq, Iterator):
-> 1591 seq = list(seq)
1592 if seq and isinstance(seq[0], Iterator):
1593 seq = list(map(list, seq))
~/anaconda3/envs/py36/lib/python3.6/site-packages/dask/bag/core.py in map_chunk()
1749 else:
1750 for a in zip(*args):
-> 1751 yield f(*a)
1752
1753 # Check that all iterators are fully exhausted
<ipython-input-1-90a316620d10> in <lambda>()
44 .map(lambda x: get_item(f"{os.getcwd()}/foods.txt",
45 x[0],
---> 46 x[1]))
47 .compute())
<ipython-input-1-90a316620d10> in get_item()
18
19 def get_item(filename, start_index, delimiter_position, encoding='cp1252'):
---> 20 with open(filename, 'rb') as file_handle:
21 file_handle.seek(start_index)
22 text = file_handle.read(delimiter_position).decode(encoding)
FileNotFoundError: [Errno 2] No such file or directory: '/mnt/c/Workspaces/Books/Dask/foods.txt'
I've tried messing with the partition numbers - leaving it as default (101), or making sure it's a multiple of 4. Doesn't seem to have an effect.
Anyone know what's going on here? It usually works if I run it a second time, but that's still tough to deal with.
I'm using the latest version of Dask. Using conda, it's all in Jupyterlab, and I'm running it from Windows Subsystem for Linux
Thanks!
Wasn't able to fix my initial read method, but was able to find another way of doing the parallel read (with native Dask objects too!)
Sections were delimited with \n\n and the linedelimiter argument to bag didn't mean what I thought it meant, but with this I was able to figure a way to get the sections I needed: Why `linedelimiter` does not work for bag.read_text?
bag.read_text(
f"{os.getcwd()}/foods.txt",
encoding="cp1252",
blocksize="10MB",
linedelimiter="\n\n",
)
.map_partitions(lambda x: "".join(x).split("\n\n"))

Sympy -- AttributeError: 'tuple' object has no attribute 'right'

I am trying to build a simple pendulum model in sympy.
Does anyone know why I would be getting this error?
The tutorial is here:
http://for.rest/articles/The-Spring-Pendulum-with-Sympy/
Here's the code. Note, I modified my code slightly because the tutorial picked a very unintuitive coordinate system(+y pointing down).
import sympy
from sympy import symbols, init_printing
import sympy.physics.mechanics as me
init_printing()
import matplotlib.pyplot as plt
import numpy as np
from pydy.system import System
from numpy import linspace
%matplotlib inline
# Create the variables
L, theta = me.dynamicsymbols('L theta')
# Create the velocities
L_dot, theta_dot = me.dynamicsymbols('L_dot theta_dot')
# Create the constants
m, g, t, L_0 = sympy.symbols('m g t L_0')
# Create the world frame
A = me.ReferenceFrame('A')
# Create the pendulum frame
B = A.orientnew('B', 'axis', [theta, A.z])
# Set the rotation of the pendulum frame
B.set_ang_vel(A, theta_dot * A.z)
# Create the Origin
O = me.Point('O')
# Create the mass point
P = O.locatenew('P', L * -B.y)
# Display the mass point location in the A frame
P.pos_from(O).express(A)
# Set origin velocity to zero
O.set_vel(A, 0)
# Create the velocity of the mass point
P.set_vel(B, L_dot * -B.y)
P.v1pt_theory(O, A, B)
P.vel(A).express(A)
The problem arises in the last line of code there. Note, the object P.vel(A) is type sympy.physics.vector.vector.Vector, which has express as a method.
Here's the error
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\formatters.py in __call__(self, obj)
691 type_pprinters=self.type_printers,
692 deferred_pprinters=self.deferred_printers)
--> 693 printer.pretty(obj)
694 printer.flush()
695 return stream.getvalue()
C:\ProgramData\Anaconda3\lib\site-packages\IPython\lib\pretty.py in pretty(self, obj)
363 if cls in self.type_pprinters:
364 # printer registered in self.type_pprinters
--> 365 return self.type_pprinters[cls](obj, self, cycle)
366 else:
367 # deferred printer
C:\ProgramData\Anaconda3\lib\site-packages\sympy\interactive\printing.py in _print_plain(arg, p, cycle)
66 """caller for pretty, for use in IPython 0.11"""
67 if _can_print_latex(arg):
---> 68 p.text(stringify_func(arg))
69 else:
70 p.text(IPython.lib.pretty.pretty(arg))
C:\ProgramData\Anaconda3\lib\site-packages\sympy\printing\pretty\pretty.py in pretty(expr, **settings)
2162
2163 try:
-> 2164 return pp.doprint(expr)
2165 finally:
2166 pretty_use_unicode(uflag)
C:\ProgramData\Anaconda3\lib\site-packages\sympy\printing\pretty\pretty.py in doprint(self, expr)
60
61 def doprint(self, expr):
---> 62 return self._print(expr).render(**self._settings)
63
64 # empty op so _print(stringPict) returns the same
C:\ProgramData\Anaconda3\lib\site-packages\sympy\physics\vector\vector.py in render(self, *args, **kwargs)
283 pform = vp._print(
284 ar[i][0][j])
--> 285 pform = prettyForm(*pform.right(" ",
286 ar[i][1].pretty_vecs[j]))
287 else:
AttributeError: 'tuple' object has no attribute 'right'
This creates a separate issue because, without expressing this, I can't run a computation later.
pendulum = me.Particle('pend', P, m)
gravity = m * g * A.y
forces = gravity
kane = me.KanesMethod(A,
q_ind=[L, theta],
u_ind=[L_dot, theta_dot],
kd_eqs=[L_dot - L.diff(t),
theta_dot - theta.diff(t)])
Which produces this error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-222-aeb1b3bb40e8> in <module>()
----> 1 fr, frstar = kane.kanes_equations([(P, forces)], [pendulum])
C:\ProgramData\Anaconda3\lib\site-packages\sympy\physics\mechanics\kane.py in kanes_equations(self, bodies, loads)
537 'kinematic differential equations to use this method.')
538 fr = self._form_fr(loads)
--> 539 frstar = self._form_frstar(bodies)
540 if self._uaux:
541 if not self._udep:
C:\ProgramData\Anaconda3\lib\site-packages\sympy\physics\mechanics\kane.py in _form_frstar(self, bl)
332 v = [msubs(vel, self._qdot_u_map) for vel in vlist]
333 return partial_velocity(v, self.u, N)
--> 334 partials = [get_partial_velocity(body) for body in bl]
335
336 # Compute fr_star in two components:
C:\ProgramData\Anaconda3\lib\site-packages\sympy\physics\mechanics\kane.py in <listcomp>(.0)
332 v = [msubs(vel, self._qdot_u_map) for vel in vlist]
333 return partial_velocity(v, self.u, N)
--> 334 partials = [get_partial_velocity(body) for body in bl]
335
336 # Compute fr_star in two components:
C:\ProgramData\Anaconda3\lib\site-packages\sympy\physics\mechanics\kane.py in get_partial_velocity(body)
326 vlist = [body.masscenter.vel(N), body.frame.ang_vel_in(N)]
327 elif isinstance(body, Particle):
--> 328 vlist = [body.point.vel(N),]
329 else:
330 raise TypeError('The body list may only contain either '
C:\ProgramData\Anaconda3\lib\site-packages\sympy\physics\vector\point.py in vel(self, frame)
453 if not (frame in self._vel_dict):
454 raise ValueError('Velocity of point ' + self.name + ' has not been'
--> 455 ' defined in ReferenceFrame ' + frame.name)
456 return self._vel_dict[frame]
457
ValueError: Velocity of point P has not been defined in ReferenceFrame A

'method-wrapper' object is not iterable in AES CTR pycrypto library

I'm trying to implement an encryption/decryption function in python2.
This is the encryption scheme:
However, I'm getting a 'method-wrapper' object is not iterable error in the AES CTR function of the pycrypto library
This is the stacktrace:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-5-1c44f21fcf83> in <module>()
45 l = random.randint(16,48)
46 m = random_bytes(l)
---> 47 assert specialDecryption(k, specialEncryption(k, m)) == m
<ipython-input-5-1c44f21fcf83> in specialEncryption(k, m)
7 # compute PRF
8 r = random_bytes(KEYLENGTH/8)
----> 9 prf = lengthQuadruplingPRF(k, r)
10
11 # xor
<ipython-input-4-59fb6141461b> in lengthQuadruplingPRF(k, r)
34 assert len(k) == KEYLENGTH/8
35 assert len(r) <= KEYLENGTH/8
---> 36 obj = AES.new(k, AES.MODE_CTR, counter=make_counter())
37 output = obj.encrypt(r*4)
38 return output
/usr/local/lib/python2.7/site-packages/Crypto/Cipher/AES.pyc in new(key, mode, *args, **kwargs)
204
205 kwargs["add_aes_modes"] = True
--> 206 return _create_cipher(sys.modules[__name__], key, mode, *args, **kwargs)
207
208
/usr/local/lib/python2.7/site-packages/Crypto/Cipher/__init__.pyc in _create_cipher(factory, key, mode, *args, **kwargs)
77 raise TypeError("IV is not meaningful for the ECB mode")
78
---> 79 return modes[mode](factory, **kwargs)
/usr/local/lib/python2.7/site-packages/Crypto/Cipher/_mode_ctr.pyc in _create_ctr_cipher(factory, **kwargs)
323 # 'counter' used to be a callable object, but now it is
324 # just a dictionary for backward compatibility.
--> 325 _counter = dict(counter)
326 try:
327 counter_len = _counter.pop("counter_len")
TypeError: 'method-wrapper' object is not iterable
Here's the code:
if __name__ == '__main__':
k = os.urandom(KEYLENGTH/8) # generate key
l = random.randint(16,48)
m = os.urandom(l)
c = specialEncryption(k, m) ## FIRST IN THE FAILURE STACK
def specialEncryption(k, m):
... other code
# compute PRF
r = os.urandom(KEYLENGTH/8)
prf = lengthQuadruplingPRF(k, r) ## SECOND IN THE FAIL STACK
... other code
def make_counter():
import struct
def gen():
i = 0;
while True:
yield struct.pack('>QQ', 0, i)
i += 1
return gen().next
def lengthQuadruplingPRF(k, r):
# Input: 16 byte key, 16 byte value
# Output: 64 byte pseudorandom bytes
obj = AES.new(k, AES.MODE_CTR, counter=make_counter()) ## FAILS HERE
output = obj.encrypt(r*4)
return output
Your counter should be an iterable capable of initializing a dict, not a bound method.
I suspect changing your make_counter function to:
return gen()
from:
return gen().next
is enough to fix it.

Error invalid value when using CUDA [duplicate]

I'm having this error when trying to run this code in Python using CUDA. I'm following this tutorial but i'm trying it in Windows 7 x64 machine.
https://www.youtube.com/watch?v=jKV1m8APttU
In fact, I run check_cuda() and all tests passed. Can anyone help me what is the exact issue here.
My Code:
import numpy as np
from timeit import default_timer as timer
from numbapro import vectorize, cuda
#vectorize(['float64(float64, float64)'], target='gpu')
def VectorAdd(a, b):
return a + b
def main():
N = 32000000
A = np.ones(N, dtype=np.float64)
B = np.ones(N, dtype=np.float64)
C = np.zeros(N, dtype=np.float64)
start = timer()
C = VectorAdd(A, B)
vectoradd_time = timer() - start
print("C[:5] = " + str(C[:5]))
print("C[-5:] = " + str(C[-5:]))
print("VectorAdd took %f seconds" % vectoradd_time)
if __name__ == '__main__':
main()
Error Message:
---------------------------------------------------------------------------
CudaAPIError Traceback (most recent call last)
<ipython-input-18-2436fc2ab63a> in <module>()
1 if __name__ == '__main__':
----> 2 main()
<ipython-input-17-64de53fdbe77> in main()
7
8 start = timer()
----> 9 C = VectorAdd(A, B)
10 vectoradd_time = timer() - start
11
C:\Anaconda2\lib\site-packages\numba\cuda\dispatcher.pyc in __call__(self, *args, **kws)
93 the input arguments.
94 """
---> 95 return CUDAUFuncMechanism.call(self.functions, args, kws)
96
97 def reduce(self, arg, stream=0):
C:\Anaconda2\lib\site-packages\numba\npyufunc\deviceufunc.pyc in call(cls, typemap, args, kws)
297
298 devarys.extend([devout])
--> 299 cr.launch(func, shape[0], stream, devarys)
300
301 if any_device:
C:\Anaconda2\lib\site-packages\numba\cuda\dispatcher.pyc in launch(self, func, count, stream, args)
202
203 def launch(self, func, count, stream, args):
--> 204 func.forall(count, stream=stream)(*args)
205
206 def is_device_array(self, obj):
C:\Anaconda2\lib\site-packages\numba\cuda\compiler.pyc in __call__(self, *args)
193
194 return kernel.configure(blkct, tpb, stream=self.stream,
--> 195 sharedmem=self.sharedmem)(*args)
196
197 class CUDAKernelBase(object):
C:\Anaconda2\lib\site-packages\numba\cuda\compiler.pyc in __call__(self, *args, **kwargs)
357 blockdim=self.blockdim,
358 stream=self.stream,
--> 359 sharedmem=self.sharedmem)
360
361 def bind(self):
C:\Anaconda2\lib\site-packages\numba\cuda\compiler.pyc in _kernel_call(self, args, griddim, blockdim, stream, sharedmem)
431 sharedmem=sharedmem)
432 # Invoke kernel
--> 433 cu_func(*kernelargs)
434
435 if self.debug:
C:\Anaconda2\lib\site-packages\numba\cuda\cudadrv\driver.pyc in __call__(self, *args)
1114
1115 launch_kernel(self.handle, self.griddim, self.blockdim,
-> 1116 self.sharedmem, streamhandle, args)
1117
1118 #property
C:\Anaconda2\lib\site-packages\numba\cuda\cudadrv\driver.pyc in launch_kernel(cufunc_handle, griddim, blockdim, sharedmem, hstream, args)
1158 hstream,
1159 params,
-> 1160 None)
1161
1162
C:\Anaconda2\lib\site-packages\numba\cuda\cudadrv\driver.pyc in safe_cuda_api_call(*args)
220 def safe_cuda_api_call(*args):
221 retcode = libfn(*args)
--> 222 self._check_error(fname, retcode)
223
224 setattr(self, fname, safe_cuda_api_call)
C:\Anaconda2\lib\site-packages\numba\cuda\cudadrv\driver.pyc in _check_error(self, fname, retcode)
250 errname = ERROR_MAP.get(retcode, "UNKNOWN_CUDA_ERROR")
251 msg = "Call to %s results in %s" % (fname, errname)
--> 252 raise CudaAPIError(retcode, msg)
253
254 def get_device(self, devnum=0):
CudaAPIError: [1] Call to cuLaunchKernel results in CUDA_ERROR_INVALID_VALUE
I found a solution to my problem through NVIDIA Developer Forum. If you wanna know more info regarding the solution check out this link.
https://devtalk.nvidia.com/default/topic/962843/cuda-programming-and-performance/cudaapierror-1-call-to-culaunchkernel-results-in-cuda_error_invalid_value-in-python/?offset=3#4968130
In Short:
When I changed the N = 32000 or any other smaller amount, it did work nicely.
In fact, this means I am not compiling it in correct GPU type(check_cuda is the function call to verify it).
Hope my answer would help for someone.
This may mean, that you try to run more threads in one block as it is actually allowed. For me it was the case. So try to split your execution in blocks.

Categories