Using numpy.reshape helped a lot and using map helped a little. Is it possible to speed this up some more?
import pydicom
import numpy as np
import cProfile
import pstats
def parse_coords(contour):
"""Given a contour from a DICOM ROIContourSequence, returns coordinates
[loop][[x0, x1, x2, ...][y0, y1, y2, ...][z0, z1, z2, ...]]"""
if not hasattr(contour, "ContourSequence"):
return [] # empty structure
def _reshape_contour_data(loop):
return np.reshape(np.array(loop.ContourData),
(3, len(loop.ContourData) // 3),
order='F')
return list(map(_reshape_contour_data,contour.ContourSequence))
def profile_load_contours():
rs = pydicom.dcmread('RS.gyn1.dcm')
structs = [parse_coords(contour) for contour in rs.ROIContourSequence]
cProfile.run('profile_load_contours()','prof.stats')
p = pstats.Stats('prof.stats')
p.sort_stats('cumulative').print_stats(30)
Using a real structure set exported from Varian Eclipse.
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 12.165 12.165 {built-in method builtins.exec}
1 0.151 0.151 12.165 12.165 <string>:1(<module>)
1 0.000 0.000 12.014 12.014 load_contour_time.py:19(profile_load_contours)
1 0.000 0.000 11.983 11.983 load_contour_time.py:21(<listcomp>)
56 0.009 0.000 11.983 0.214 load_contour_time.py:7(parse_coords)
50745/33837 0.129 0.000 11.422 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/dataset.py:455(__getattr__)
50741/33825 0.152 0.000 10.938 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/dataset.py:496(__getitem__)
16864 0.069 0.000 9.839 0.001 load_contour_time.py:12(_reshape_contour_data)
16915 0.101 0.000 9.780 0.001 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/dataelem.py:439(DataElement_from_raw)
16915 0.052 0.000 9.300 0.001 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/values.py:320(convert_value)
16864 0.038 0.000 7.099 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/values.py:89(convert_DS_string)
16870 0.042 0.000 7.010 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/valuerep.py:495(MultiString)
16908 1.013 0.000 6.826 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/multival.py:29(__init__)
3004437 3.013 0.000 5.577 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/multival.py:42(number_string_type_constructor)
3038317/3038231 1.037 0.000 3.171 0.000 {built-in method builtins.hasattr}
Much of the time is in convert_DS_string. Is it possible to make it faster? I guess part of the problem is that the coordinates are not stored very efficiently in the DICOM file.
EDIT:
As a way of avoiding the loop at the end of MultiVal.__init__ I am wondering about getting the raw double string of each ContourData and using numpy.fromstring on it. However, I have not been able to get the raw double string.
Eliminating the loop in MultiVal.__init__ and using numpy.fromstring provides more than 4 times speedup. I will post on the pydicom github see if there is some interest in taking this into the library code. It is a little ugly. I would welcome advice on further improvement.
import pydicom
import numpy as np
import cProfile
import pstats
def parse_coords(contour):
"""Given a contour from a DICOM ROIContourSequence, returns coordinates
[loop][[x0, x1, x2, ...][y0, y1, y2, ...][z0, z1, z2, ...]]"""
if not hasattr(contour, "ContourSequence"):
return [] # empty structure
cd_tag = pydicom.tag.Tag(0x3006, 0x0050) # ContourData tag
def _reshape_contour_data(loop):
val = super(loop.__class__, loop).__getitem__(cd_tag).value
try:
double_string = val.decode(encoding='utf-8')
double_vec = np.fromstring(double_string, dtype=float, sep=chr(92)) # 92 is '/'
except AttributeError: # 'MultiValue' has no 'decode' (bytes does)
# It's already been converted to doubles and cached
double_vec = loop.ContourData
return np.reshape(np.array(double_vec),
(3, len(double_vec) // 3),
order='F')
return list(map(_reshape_contour_data, contour.ContourSequence))
def profile_load_contours():
rs = pydicom.dcmread('RS.gyn1.dcm')
structs = [parse_coords(contour) for contour in rs.ROIContourSequence]
profile_load_contours()
cProfile.run('profile_load_contours()','prof.stats')
p = pstats.Stats('prof.stats')
p.sort_stats('cumulative').print_stats(15)
Result
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 2.800 2.800 {built-in method builtins.exec}
1 0.017 0.017 2.800 2.800 <string>:1(<module>)
1 0.000 0.000 2.783 2.783 load_contour_time3.py:29(profile_load_contours)
1 0.000 0.000 2.761 2.761 load_contour_time3.py:31(<listcomp>)
56 0.006 0.000 2.760 0.049 load_contour_time3.py:9(parse_coords)
153/109 0.001 0.000 2.184 0.020 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/dataset.py:455(__getattr__)
149/97 0.001 0.000 2.182 0.022 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/dataset.py:496(__getitem__)
51 0.000 0.000 2.178 0.043 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/dataelem.py:439(DataElement_from_raw)
51 0.000 0.000 2.177 0.043 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/values.py:320(convert_value)
44 0.000 0.000 2.176 0.049 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/values.py:255(convert_SQ)
44 0.035 0.001 2.176 0.049 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/filereader.py:427(read_sequence)
152/66 0.000 0.000 2.171 0.033 {built-in method builtins.hasattr}
16920 0.147 0.000 1.993 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/filereader.py:452(read_sequence_item)
16923 0.116 0.000 1.267 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/filereader.py:365(read_dataset)
84616 0.113 0.000 0.699 0.000 /home/cf/python/venv/lib/python3.5/site-packages/pydicom/dataset.py:960(__setattr__)
Related
NumPy version: 1.14.5
Purpose of the 'foo' function:
Finding the Euclid distance between arrays with the shapes (1,512), which represent facial features.
Issue:
foo function takes ~223.32 ms , but after that, some background operations related to NumPy take 170 seconds for some reason
Question:
Is keeping arrays in dictionaries, and iterating over them is a very dangerous usage of NumPy arrays?
Request for an Advice:
When I keep the arrays stacked and separate from dict, Euclid distance calculation takes half the time (~120ms instead of ~250ms), but overall performance doesn't change much for some reason. Allocating new arrays and stacking them may have countered the benefits of bigger array calculations.
I am open to any advice.
Code:
import numpy as np
import time
import uuid
import random
from funcy import print_durations
#print_durations
def foo(merged_faces_rec, face):
t = time.time()
for uid, feature_list in merged_faces_rec.items():
dist = np.linalg.norm( np.subtract(feature_list[0], face))
print("foo inside : ", time.time()-t)
rand_age = lambda : random.choice(["0-18", "18-35", "35-55", "55+"])
rand_gender = lambda : random.choice(["Erkek", "Kadin"])
rand_emo = lambda : random.choice(["happy", "sad", "neutral", "scared"])
date_list = []
emb = lambda : np.random.rand(1, 512)
def generate_faces_rec(d, n=12000):
for _ in range(n):
d[uuid.uuid4().hex] = [emb(), rand_gender(), rand_age(), rand_emo(), date_list]
faces_rec1 = dict()
generate_faces_rec(faces_rec1)
faces_rec2 = dict()
generate_faces_rec(faces_rec2)
faces_rec3 = dict()
generate_faces_rec(faces_rec3)
faces_rec4 = dict()
generate_faces_rec(faces_rec4)
faces_rec5 = dict()
generate_faces_rec(faces_rec5)
merged_faces_rec = dict()
st = time.time()
merged_faces_rec.update(faces_rec1)
merged_faces_rec.update(faces_rec2)
merged_faces_rec.update(faces_rec3)
merged_faces_rec.update(faces_rec4)
merged_faces_rec.update(faces_rec5)
t2 = time.time()
print("updates: ", t2-st)
face = list(merged_faces_rec.values())[0][0]
t3 = time.time()
print("face: ", t3-t2)
t4 = time.time()
foo(merged_faces_rec, face)
t5 = time.time()
print("foo: ", t5-t4)
Result:
Computations between t4 and t5 took 168 seconds.
updates: 0.00468754768371582
face: 0.0011434555053710938
foo inside : 0.2232837677001953
223.32 ms in foo({'d02d46999aa145be8116..., [[0.96475353 0.8055263...)
foo: 168.42408967018127
cProfile
python3 -m cProfile -s tottime test.py
cProfile Result:
Ordered by: internal time
ncalls tottime percall cumtime percall filename:lineno(function)
30720512 44.991 0.000 85.425 0.000 arrayprint.py:888(__call__)
36791296 42.447 0.000 42.447 0.000 {built-in method numpy.core.multiarray.dragon4_positional}
30840514/60001 36.154 0.000 149.749 0.002 arrayprint.py:659(recurser)
24649728 25.967 0.000 25.967 0.000 {built-in method numpy.core.multiarray.dragon4_scientific}
30720512 20.183 0.000 26.420 0.000 arrayprint.py:636(_extendLine)
10 12.281 1.228 12.281 1.228 {method 'sub' of '_sre.SRE_Pattern' objects}
60001 11.434 0.000 79.370 0.001 arrayprint.py:804(fillFormat)
228330011/228329975 10.270 0.000 10.270 0.000 {built-in method builtins.len}
204081 4.815 0.000 16.469 0.000 {built-in method builtins.max}
18431577 4.624 0.000 21.742 0.000 arrayprint.py:854(<genexpr>)
18431577 4.453 0.000 28.627 0.000 arrayprint.py:859(<genexpr>)
30720531 3.987 0.000 3.987 0.000 {method 'split' of 'str' objects}
12348936 3.012 0.000 13.873 0.000 arrayprint.py:829(<genexpr>)
12348936 3.007 0.000 17.955 0.000 arrayprint.py:832(<genexpr>)
18431577 2.179 0.000 2.941 0.000 arrayprint.py:863(<genexpr>)
18431577 2.124 0.000 2.870 0.000 arrayprint.py:864(<genexpr>)
12348936 1.625 0.000 3.180 0.000 arrayprint.py:833(<genexpr>)
12348936 1.468 0.000 1.992 0.000 arrayprint.py:834(<genexpr>)
12348936 1.433 0.000 1.922 0.000 arrayprint.py:844(<genexpr>)
12348936 1.432 0.000 1.929 0.000 arrayprint.py:837(<genexpr>)
12324864 1.074 0.000 1.074 0.000 {method 'partition' of 'str' objects}
6845518 0.761 0.000 0.761 0.000 {method 'rstrip' of 'str' objects}
60001 0.747 0.000 80.175 0.001 arrayprint.py:777(__init__)
2 0.637 0.319 245.563 122.782 debug.py:237(smart_repr)
120002 0.573 0.000 0.573 0.000 {method 'reduce' of 'numpy.ufunc' objects}
60001 0.421 0.000 231.153 0.004 arrayprint.py:436(_array2string)
60000 0.370 0.000 0.370 0.000 {method 'rand' of 'mtrand.RandomState' objects}
60000 0.303 0.000 232.641 0.004 arrayprint.py:1334(array_repr)
60001 0.274 0.000 232.208 0.004 arrayprint.py:465(array2string)
60001 0.261 0.000 80.780 0.001 arrayprint.py:367(_get_format_function)
120008 0.255 0.000 0.611 0.000 numeric.py:2460(seterr)
Update to Clearify the Question
This is the part that has the bug. Something behind the scenes causes to program to take too long. Is it something to do with garbage collector, or just weird numpy bug? I don't have any clue.
t6 = time.time()
foo1(big_array, face) # 223.32ms
t7 = time.time()
print("foo1 : ", t7-t6) # foo1 : 170 seconds
I am trying to parallelize an embarrassingly parallel for loop (previously asked here) and settled on this implementation that fit my parameters:
with Manager() as proxy_manager:
shared_inputs = proxy_manager.list([datasets, train_size_common, feat_sel_size, train_perc,
total_test_samples, num_classes, num_features, label_set,
method_names, pos_class_index, out_results_dir, exhaustive_search])
partial_func_holdout = partial(holdout_trial_compare_datasets, *shared_inputs)
with Pool(processes=num_procs) as pool:
cv_results = pool.map(partial_func_holdout, range(num_repetitions))
The reason I need to use a proxy object (shared between processes) is the first element in the shared proxy list datasets that is a list of large objects (each about 200-300MB). This datasets list usually has 5-25 elements. I typically need to run this program on a HPC cluster.
Here is the question, when I run this program with 32 processes and 50GB of memory (num_repetitions=200, with datasets being a list of 10 objects, each 250MB), I do not see a speedup even by factor of 16 (with 32 parallel processes). I do not understand why - any clues? Any obvious mistakes, or bad choices? Where can I improve this implementation? Any alternatives?
I am sure this has been discussed before, and the reasons can be varied and very specific to implementation - hence I request you to provide me your 2 cents. Thanks.
Update: I did some profiling with cProfile to get a better idea - here is some info, sorted by cumulative time.
In [19]: p.sort_stats('cumulative').print_stats(50)
Mon Oct 16 16:43:59 2017 profiling_log.txt
555404 function calls (543552 primitive calls) in 662.201 seconds
Ordered by: cumulative time
List reduced from 4510 to 50 due to restriction <50>
ncalls tottime percall cumtime percall filename:lineno(function)
897/1 0.044 0.000 662.202 662.202 {built-in method builtins.exec}
1 0.000 0.000 662.202 662.202 test_rhst.py:2(<module>)
1 0.001 0.001 661.341 661.341 test_rhst.py:70(test_chance_classifier_binary)
1 0.000 0.000 661.336 661.336 /Users/Reddy/dev/neuropredict/neuropredict/rhst.py:677(run)
4 0.000 0.000 661.233 165.308 /Users/Reddy/anaconda/envs/py36/lib/python3.6/threading.py:533(wait)
4 0.000 0.000 661.233 165.308 /Users/Reddy/anaconda/envs/py36/lib/python3.6/threading.py:263(wait)
23 661.233 28.749 661.233 28.749 {method 'acquire' of '_thread.lock' objects}
1 0.000 0.000 661.233 661.233 /Users/Reddy/anaconda/envs/py36/lib/python3.6/multiprocessing/pool.py:261(map)
1 0.000 0.000 661.233 661.233 /Users/Reddy/anaconda/envs/py36/lib/python3.6/multiprocessing/pool.py:637(get)
1 0.000 0.000 661.233 661.233 /Users/Reddy/anaconda/envs/py36/lib/python3.6/multiprocessing/pool.py:634(wait)
866/8 0.004 0.000 0.868 0.108 <frozen importlib._bootstrap>:958(_find_and_load)
866/8 0.003 0.000 0.867 0.108 <frozen importlib._bootstrap>:931(_find_and_load_unlocked)
720/8 0.003 0.000 0.865 0.108 <frozen importlib._bootstrap>:641(_load_unlocked)
596/8 0.002 0.000 0.865 0.108 <frozen importlib._bootstrap_external>:672(exec_module)
1017/8 0.001 0.000 0.863 0.108 <frozen importlib._bootstrap>:197(_call_with_frames_removed)
522/51 0.001 0.000 0.765 0.015 {built-in method builtins.__import__}
The profiling info now sorted by time
In [20]: p.sort_stats('time').print_stats(20)
Mon Oct 16 16:43:59 2017 profiling_log.txt
555404 function calls (543552 primitive calls) in 662.201 seconds
Ordered by: internal time
List reduced from 4510 to 20 due to restriction <20>
ncalls tottime percall cumtime percall filename:lineno(function)
23 661.233 28.749 661.233 28.749 {method 'acquire' of '_thread.lock' objects}
115/80 0.177 0.002 0.211 0.003 {built-in method _imp.create_dynamic}
595 0.072 0.000 0.072 0.000 {built-in method marshal.loads}
1 0.045 0.045 0.045 0.045 {method 'acquire' of '_multiprocessing.SemLock' objects}
897/1 0.044 0.000 662.202 662.202 {built-in method builtins.exec}
3 0.042 0.014 0.042 0.014 {method 'read' of '_io.BufferedReader' objects}
2037/1974 0.037 0.000 0.082 0.000 {built-in method builtins.__build_class__}
286 0.022 0.000 0.061 0.000 /Users/Reddy/anaconda/envs/py36/lib/python3.6/site-packages/scipy/misc/doccer.py:12(docformat)
2886 0.021 0.000 0.021 0.000 {built-in method posix.stat}
79 0.016 0.000 0.016 0.000 {built-in method posix.read}
597 0.013 0.000 0.021 0.000 <frozen importlib._bootstrap_external>:830(get_data)
276 0.011 0.000 0.013 0.000 /Users/Reddy/anaconda/envs/py36/lib/python3.6/sre_compile.py:250(_optimize_charset)
108 0.011 0.000 0.038 0.000 /Users/Reddy/anaconda/envs/py36/lib/python3.6/site-packages/scipy/stats/_distn_infrastructure.py:626(_construct_argparser)
1225 0.011 0.000 0.050 0.000 <frozen importlib._bootstrap_external>:1233(find_spec)
7179 0.009 0.000 0.009 0.000 {method 'splitlines' of 'str' objects}
33 0.008 0.000 0.008 0.000 {built-in method posix.waitpid}
283 0.008 0.000 0.015 0.000 /Users/Reddy/anaconda/envs/py36/lib/python3.6/site-packages/scipy/misc/doccer.py:128(indentcount_lines)
3 0.008 0.003 0.008 0.003 {method 'poll' of 'select.poll' objects}
7178 0.008 0.000 0.008 0.000 {method 'expandtabs' of 'str' objects}
597 0.007 0.000 0.007 0.000 {method 'read' of '_io.FileIO' objects}
More profiling info sorted by percall info:
Update 2
The elements in the large list datasets I mentioned earlier are not usually as big - they are typically 10-25MB each. But depending on the floating point precision used, number of samples and features, this can easily grow to 500MB-1GB per element also. hence I'd prefer a solution that can scale.
Update 3:
The code inside holdout_trial_compare_datasets uses method GridSearchCV of scikit-learn, which internally uses joblib library if we set n_jobs > 1 (or whenever we even set it). This might lead to some bad interactions between multiprocessing and joblib. So trying another config where I do not set n_jobs at all (which should to default no parallelism within scikit-learn). Will keep you posted.
Based on discussion in the comments, I did a mini experiment, compared three versions of implementation:
v1: basically as same as your approach, in fact, as partial(f1, *shared_inputs) will unpack proxy_manager.list immediately, Manager.List not involved here, data passed to worker with the internal queue of Pool.
v2: v2 made use Manager.List, work function will receive a ListProxy object, it fetches shared data via a internal connection to a server process.
v3: child process share data from the parent, take advantage of fork(2) system call.
def f1(*args):
for e in args[0]: pow(e, 2)
def f2(*args):
for e in args[0][0]: pow(e, 2)
def f3(n):
for i in datasets: pow(i, 2)
def v1(np):
with mp.Manager() as proxy_manager:
shared_inputs = proxy_manager.list([datasets,])
pf = partial(f1, *shared_inputs)
with mp.Pool(processes=np) as pool:
r = pool.map(pf, range(16))
def v2(np):
with mp.Manager() as proxy_manager:
shared_inputs = proxy_manager.list([datasets,])
pf = partial(f2, shared_inputs)
with mp.Pool(processes=np) as pool:
r = pool.map(pf, range(16))
def v3(np):
with mp.Pool(processes=np) as pool:
r = pool.map(f3, range(16))
datasets = [2.0 for _ in range(10 * 1000 * 1000)]
for f in (v1, v2, v3):
print(f.__code__.co_name)
for np in (2, 4, 8, 16):
s = time()
f(np)
print("%s %.2fs" % (np, time()-s))
results taken on a 16 core E5-2682 VPC, it is obvious that v3 scales better:
{method 'acquire' of '_thread.lock' objects}
Looking at your profiler output I would say that the shared object lock/unlock overhead overwhelms the speed gains of multithreading.
Refactor so that the work is farmed out to workers that do not need to talk to one another as much.
Specifically, if possible, derive one answer per data pile and then act on the accumulated results.
This is why Queues can seem so much faster: they involve a type of work that does not require an object that has to be 'managed' and so locked/unlocked.
Only 'manage' things that absolutely need to be shared between processes. Your managed list contains some very complicated looking objects...
A faster paradigm is:
allwork = manager.list([a, b,c])
theresult = manager.list()
and then
while mywork:
unitofwork = allwork.pop()
theresult = myfunction(unitofwork)
If you do not need a complex shared object, then only use a list of the most simple objects imaginable.
Then tell the workers to acquire the complex data that they can process in their own little world.
Try:
allwork = manager.list([datasetid1, datasetid2 ,...])
theresult = manager.list()
while mywork:
unitofworkid = allwork.pop()
theresult = myfunction(unitofworkid)
def myfunction(unitofworkid):
thework = acquiredataset(unitofworkid)
result = holdout_trial_compare_datasets(thework, ...)
I hope that this makes sense. It should not take too much time to refactor in this direction. And you should see that {method 'acquire' of '_thread.lock' objects} number drop like a rock when you profile.
Here is the rate limiting function in my code
def timepropagate(wv1, ham11,
ham12, ham22, scalararray, nt):
wv2 = np.zeros((nx, ny), 'c16')
fw1 = np.zeros((nx, ny), 'c16')
fw2 = np.zeros((nx, ny), 'c16')
for t in range(0, nt, 1):
wv1, wv2 = scalararray*wv1, scalararray*wv2
fw1, fw2 = (np.fft.fft2(wv1), np.fft.fft2(wv2))
fw1 = ham11*fw1+ham12*fw2
fw2 = ham12*fw1+ham22*fw2
wv1, wv2 = (np.fft.ifft2(fw1), np.fft.ifft2(fw2))
wv1, wv2 = scalararray*wv1, scalararray*wv2
del(fw1)
del(fw2)
return np.array([wv1, wv2])
What I would need to do is find a reasonably fast implementation that would allow me to go at twice the speed, preferably the fastest.
The more general question I'm interested in, is what way can I speed up this piece, using minimal possible connections back to python. I assume that even if I speed up specific segments of the code, say the scalar array multiplications, I would still come back and go from python at the Fourier transforms which would take time. Are there any ways I can use, say numba or cython and not make this "coming back" to python in the middle of the loops?
On a personal note, I'd prefer something fast on a single thread considering that I'd be using my other threads already.
Edit: here are results of profiling, the 1st one for 4096x4096 arrays for 10 time steps, I need to scale it up for nt = 8000.
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.099 0.099 432.556 432.556 <string>:1(<module>)
40 0.031 0.001 28.792 0.720 fftpack.py:100(fft)
40 45.867 1.147 68.055 1.701 fftpack.py:195(ifft)
80 0.236 0.003 47.647 0.596 fftpack.py:46(_raw_fft)
40 0.102 0.003 1.260 0.032 fftpack.py:598(_cook_nd_args)
40 1.615 0.040 99.774 2.494 fftpack.py:617(_raw_fftnd)
20 0.225 0.011 29.739 1.487 fftpack.py:819(fft2)
20 2.252 0.113 72.512 3.626 fftpack.py:908(ifft2)
80 0.000 0.000 0.000 0.000 fftpack.py:93(_unitary)
40 0.631 0.016 0.820 0.021 fromnumeric.py:43(_wrapit)
80 0.009 0.000 0.009 0.000 fromnumeric.py:457(swapaxes)
40 0.338 0.008 1.158 0.029 fromnumeric.py:56(take)
200 0.064 0.000 0.219 0.001 numeric.py:414(asarray)
1 329.728 329.728 432.458 432.458 profiling.py:86(timepropagate)
1 0.036 0.036 432.592 432.592 {built-in method builtins.exec}
40 0.001 0.000 0.001 0.000 {built-in method builtins.getattr}
120 0.000 0.000 0.000 0.000 {built-in method builtins.len}
241 3.930 0.016 3.930 0.016 {built-in method numpy.core.multiarray.array}
3 0.000 0.000 0.000 0.000 {built-in method numpy.core.multiarray.zeros}
40 18.861 0.472 18.861 0.472 {built-in method numpy.fft.fftpack_lite.cfftb}
40 28.539 0.713 28.539 0.713 {built-in method numpy.fft.fftpack_lite.cfftf}
1 0.000 0.000 0.000 0.000 {built-in method numpy.fft.fftpack_lite.cffti}
80 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}
40 0.006 0.000 0.006 0.000 {method 'astype' of 'numpy.ndarray' objects}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
80 0.000 0.000 0.000 0.000 {method 'pop' of 'list' objects}
40 0.000 0.000 0.000 0.000 {method 'reverse' of 'list' objects}
80 0.000 0.000 0.000 0.000 {method 'setdefault' of 'dict' objects}
80 0.001 0.000 0.001 0.000 {method 'swapaxes' of 'numpy.ndarray' objects}
40 0.022 0.001 0.022 0.001 {method 'take' of 'numpy.ndarray' objects}
I think I've done it wrong the first time, using time.time() to calculate time differences for small arrays and extrapolating the conclusions for larger ones.
If most of the time is spent in the hamiltonian multiplication, you may want to apply numba on that part. The most benefit coming from removing all the temporal arrays that would be needed if evaluating expressions from within NumPy.
Bear also in mind that the arrays (4096, 4096, c16) are big enough to not fit comfortably in the processor caches. A single matrix would take 256 MiB. So think that the performance is unlikely to be related at all with the operations, but rather on the bandwidth. So implement those operations in a way that you only perform one pass in the input operands. This is really trivial to implement in numba. Note: You will only need to implement in numba the hamiltonian expressions.
I want also to point out that the "preallocations" using np.zeros seems to signal that your code is not following your intent as:
fw1 = ham11*fw1+ham12*fw2
fw2 = ham12*fw1+ham22*fw2
will actually create new arrays for fw1, fw2. If your intent was to reuse the buffer, you may want to use "fw1[:,:] = ...". Otherwise the np.zeros do nothing but waste time and memory.
You may want to consider to join (wv1, wv2) into a (2, 4096, 4096, c16) array. The same with (fw1, fw2). That way code will be simpler as you can rely on broadcasting to handle the "scalararray" product. fft2 and ifft2 will actually do the right thing (AFAIK).
I'm trying to profile a few lines of Pandas code, and when I run %prun i'm finding most of my time is taken by {isinstance}. This seems to happen a lot -- can anyone suggest what that means and, for bonus points, suggest a way to avoid it?
This isn't meant to be application specific, but here's a thinned out version of the code if that's important:
def flagOtherGroup(df):
try:mostUsed0 = df[df.subGroupDummy == 0].siteid.iloc[0]
except: mostUsed0 = -1
try: mostUsed1 = df[df.subGroupDummy == 1].siteid.iloc[0]
except: mostUsed1 = -1
df['mostUsed'] = 0
df.loc[(df.subGroupDummy == 0) & (df.siteid == mostUsed1), 'mostUsed'] = 1
df.loc[(df.subGroupDummy == 1) & (df.siteid == mostUsed0), 'mostUsed'] = 1
return df[['mostUsed']]
%prun -l15 temp = test.groupby('userCode').apply(flagOtherGroup)
And top lines of prun:
Ordered by: internal time
List reduced from 531 to 15 due to restriction <15>
ncalls tottime percall cumtime percall filename:lineno(function)
834472 1.908 0.000 2.280 0.000 {isinstance}
497048/395400 1.192 0.000 1.572 0.000 {len}
32722 0.879 0.000 4.479 0.000 series.py:114(__init__)
34444 0.613 0.000 1.792 0.000 internals.py:3286(__init__)
25990 0.568 0.000 0.568 0.000 {method 'reduce' of 'numpy.ufunc' objects}
82266/78821 0.549 0.000 0.744 0.000 {numpy.core.multiarray.array}
42201 0.544 0.000 1.195 0.000 internals.py:62(__init__)
42201 0.485 0.000 1.812 0.000 internals.py:2015(make_block)
166244 0.476 0.000 0.615 0.000 {getattr}
4310 0.455 0.000 1.121 0.000 internals.py:2217(_rebuild_blknos_and_blklocs)
12054 0.417 0.000 2.134 0.000 internals.py:2355(apply)
9474 0.385 0.000 1.284 0.000 common.py:727(take_nd)
isinstance, len and getattr are just the built-in functions. There are a huge number of calls to the isinstance() function here; it is not that the call itself takes a lot of time, but the function was used 834472 times.
Presumably it is the pandas code that uses it.
I have a script that finds the sum of all numbers that can be written as the sum of fifth powers of their digits. (This problem is described in more detail on the Project Euler web site.)
I have written it two ways, but I do not understand the performance difference.
The first way uses nested list comprehensions:
exp = 5
def min_combo(n):
return ''.join(sorted(list(str(n))))
def fifth_power(n, exp):
return sum([int(x) ** exp for x in list(n)])
print sum( [fifth_power(j,exp) for j in set([min_combo(i) for i in range(101,1000000) ]) if int(j) > 10 and j == min_combo(fifth_power(j,exp)) ] )
and profiles like this:
$ python -m cProfile euler30.py
443839
3039223 function calls in 2.040 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
1007801 1.086 0.000 1.721 0.000 euler30.py:10(min_combo)
7908 0.024 0.000 0.026 0.000 euler30.py:14(fifth_power)
1 0.279 0.279 2.040 2.040 euler30.py:6(<module>)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1007801 0.175 0.000 0.175 0.000 {method 'join' of 'str' objects}
1 0.013 0.013 0.013 0.013 {range}
1007801 0.461 0.000 0.461 0.000 {sorted}
7909 0.002 0.000 0.002 0.000 {sum}
The second way is the more usual for loop:
exp = 5
ans= 0
def min_combo(n):
return ''.join(sorted(list(str(n))))
def fifth_power(n, exp):
return sum([int(x) ** exp for x in list(n)])
for j in set([ ''.join(sorted(list(str(i)))) for i in range(100, 1000000) ]):
if int(j) > 10:
if j == min_combo(fifth_power(j,exp)):
ans += fifth_power(j,exp)
print 'answer', ans
Here is the profiling info again:
$ python -m cProfile euler30.py
answer 443839
2039325 function calls in 1.709 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
7908 0.024 0.000 0.026 0.000 euler30.py:13(fifth_power)
1 1.081 1.081 1.709 1.709 euler30.py:6(<module>)
7902 0.009 0.000 0.015 0.000 euler30.py:9(min_combo)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1007802 0.147 0.000 0.147 0.000 {method 'join' of 'str' objects}
1 0.013 0.013 0.013 0.013 {range}
1007802 0.433 0.000 0.433 0.000 {sorted}
7908 0.002 0.000 0.002 0.000 {sum}
Why does the list comprehension implementation call min_combo() 1,000,000 more times than the for loop implementation?
Because on the second one you implemented again the content of min_combo inside the set call...
Do the same thing and you'll have the same result.
BTW, change those to avoid big lists being created:
sum([something for foo in bar]) -> sum(something for foo in bar)
set([something for foo in bar]) -> set(something for foo in bar)
(without [...] they become generator expressions).