Python 2.7, Windows 7.
I'm looking for tips on how to make a calculation heavy script run faster. First an idea of what I'm doing:
Starting with a given color, I want to generate a list of 30 more colors (rgb values) that are maximally distinctive to the human eye from one another, and with the front of the list more distinctive than the end.
Currently, I estimate that the script will take ~48 hours to complete. I could let it run over the weekend, but I figured I would take the opportunity to learn something about python performance.
An overview what the code does:
gen_colours() contains a loop that runs 30 times. Each time 4 processes run multi(n, l, g) which contains the big loop iterating over each r, g, and b value between 0 and 255 (r value is split between processes so it loops 64 times). The inner most loop contains another loop that checks the rgb value against rgb values already found by calling compute_dist([r, g, b], c).
Anyways, without completely restructuring my code, things to help speed it up would be cool. Also, running all four cpus at max for 48 hours...issues there?
Code:
from math import sqrt, pow, atan2, atan, sin, cos, exp, radians, degrees
from fractions import Fraction
import time
import multiprocessing
def to_xyz(rgb):
r = rgb[0] / 255.0
g = rgb[1] / 255.0
b = rgb[2] / 255.0
f = Fraction(12, 5)
if r > 0.04045:
r = ((r + 0.055) / 1.055) ** f
else:
r /= 12.92
if g > 0.04045:
g = ((g + 0.055) / 1.055) ** f
else:
g /= 12.92
if b > 0.04045:
b = ((b + 0.055) / 1.055) ** f
else:
b /= 12.92
r *= 100
g *= 100
b *= 100
# Observer = 2 degrees, Illuminant = D65
x = r * 0.4124 + g * 0.3576 + b * 0.1805
y = r * 0.2126 + g * 0.7152 + b * 0.0722
z = r * 0.0193 + g * 0.1192 + b * 0.9505
return [x, y, z]
def to_lab(xyz):
x = xyz[0]
y = xyz[1]
z = xyz[2]
# Observer= 2deg, Illuminant= D65
x /= 95.047
y /= 100.0
z /= 108.883
f = Fraction(1, 3)
if x > 0.008856:
x **= f
else:
x = 7.787 * x + 0.13793103448
if y > 0.008856:
y **= f
else:
y = 7.787 * y + 0.13793103448
if z > 0.008856:
z **= f
else:
z = 7.787 * z + 0.13793103448
L = 116 * y - 16
a = 500 * (x - y)
b = 200 * (y - z)
return [L, a, b]
def compute_dist(rgb1, rgb2):
""" Compute the apparent difference in colours using CIEDE2000 standards """
xyz1 = to_xyz(rgb1)
xyz2 = to_xyz(rgb2)
lab1 = to_lab(xyz1)
lab2 = to_lab(xyz2)
a1 = lab1[1]
a2 = lab2[1]
b1 = lab1[2]
b2 = lab2[2]
L1 = lab1[0]
L2 = lab2[0]
c1 = sqrt(a1 * a1 + b1 * b1)
c2 = sqrt(a2 * a2 + b2 * b2)
c = (c1 + c2) / 2
crs = c ** 7
x = 0.5 - 0.5 * sqrt(crs / (crs + 6103515625))
temp = (1 + x) * a1
c1 = sqrt(temp * temp + b1 * b1)
h1 = hue(temp, b1)
temp = (1 + x) * a2
c2 = sqrt(temp * temp + b2 * b2)
h2 = hue(temp, b2)
dL = L2 - L1
dc = c2 - c1
if c1 * c2 == 0:
dh = 0
else:
temp = round(h2 - h1, 12)
if abs(temp) <= 180:
dh = h2 - h1
else:
if temp > 180:
dh = h2 - h1 - 360
else:
dh = h2 - h1 + 360
dh = sqrt(c1 * c2) * sin(radians(dh / 2))
dh += dh
lav = (L1 + L2) / 2
cav = (c1 + c2) / 2
if c1 * c2 == 0:
htot = h1 + h2
else:
temp = abs(round(h1 - h2, 12))
if temp > 180:
if h2 + h1 < 360:
htot = h1 + h2 + 360
else:
htot = h1 + h2 - 360
else:
htot = h1 + h2
htot /= 2
T = 1 - 0.17 * cos(radians(htot - 30)) + 0.24 * cos(radians(2 * htot)) + 0.32 * cos(radians(3 * htot + 6)) - 0.20 * cos(radians(4 * htot - 63))
htotdtme = (htot / 25) - 11
xPH = 30 * exp(-htotdtme * htotdtme)
cavrs = cav ** 7
scocp = sqrt(cavrs / (cavrs + 6103515625))
xRC = scocp + scocp
lavmf = lav - 50
lavmfs = lavmf * lavmf
SL = 1 + 0.015 * lavmfs / sqrt(20 + lavmfs)
SC = 1 + 0.045 * cav
SH = 1 + 0.015 * cav * T
RT = -sin(radians(xPH + xPH)) * xRC
dL /= SL
dc /= SC
dh /= SH
dE = sqrt(dL * dL + dc * dc + dh * dh + RT * dc * dh)
return dE
def hue(a, b): # Function returns CIELAB-Hue value
c = 0
if a >= 0 and b == 0:
return 0
if a < 0 and b == 0:
return 180
if a == 0 and b > 0:
return 90
if a == 0 and b < 0:
return 270
if a > 0 and b > 0:
c = 0
elif a < 0:
c = 180
elif b < 0:
c = 360
return degrees(atan(b / a)) + c
def multi(p, l, q):
f = 0
n = []
s = p * 64
e = (p + 1) * 64
for r in xrange(s, e):
for g in xrange(256):
for b in xrange(256):
s = 1000 # smallest dist
for c in l: # compare to existing colours
d = compute_dist([r, g, b], c)
if d < s:
s = d
if s > f:
n = [r, g, b]
f = s
q.put(f)
q.put(n)
def gen_colours(start_col=[68, 68, 68]):
out = open('colour_output.txt', 'w')
l = [start_col]
if __name__ == '__main__':
q0 = multiprocessing.Queue()
q1 = multiprocessing.Queue()
q2 = multiprocessing.Queue()
q3 = multiprocessing.Queue()
for h in xrange(30): # create 30 more colours
p0 = multiprocessing.Process(target=multi, args=[0, l, q0])
p1 = multiprocessing.Process(target=multi, args=[1, l, q1])
p2 = multiprocessing.Process(target=multi, args=[2, l, q2])
p3 = multiprocessing.Process(target=multi, args=[3, l, q3])
p0.start()
p1.start()
p2.start()
p3.start()
p0.join()
p1.join()
p2.join()
p3.join()
d0 = q0.get()
d1 = q1.get()
d2 = q2.get()
d3 = q3.get()
c0 = q0.get()
c1 = q1.get()
c2 = q2.get()
c3 = q3.get()
d = [d0, d1, d2, d3]
c = [c0, c1, c2, c3]
m = max(d)
i = d.index(m)
n = c[i]
l.append(n)
out.write("[" + str(n[0]) + ", " + str(n[1]) + ", " + str(n[2]) + "]\n")
print "\nnew colour added: " + str(l)
out.close()
print "Done"
gen_colours()
Any tips?
Edit:
An obvious improvement is the fact that I'm calculating Lab values on found rgb colors every time. I added a list to store Lab values for these so that it doesn't need to do this each loop. This reduced time by about 1/4. Not a Python performance improvement that I'm looking for however.
I'm sure a color that is R:100 G:100 B:101 would not be a "maximally distinctive" solution if color R:100 G:100 B:100 is chosen already.
One quick improvement you could make is to omit checking colors which are similar (ie. R and G values which are the same that have a B value within a given range).
You're doing way too much work.
It appears that you are working in a 24-bit RGB color-space when most monitor/gamut/ambient/eye combinations afford far less discriminability than your CIE calculations produce.
Assuming you are doing this to provide real-world colors for real-eyes, you also have to account for the myriad forms of colorblindness which reduces you to less than 12-bit useful color-space. Even if we are just talking about luminance, as luminance approaches the lower third of the device gamut, noticable differences become ever sparser.
The problem that you have is algorithmic; that is, you are working too hard to get detailed results when the added detail is irrelevant. Between #000 and #fff, there are only 4096 possible colors and the red-green axis can be rejected out of hand.
Related
I'm writing a Python script for testing C implementation of Edwards elliptic curves point addition and doubling.
I'm following this paper and implementing the formula set (5) for "unified addition" (which means any 2 points can be added), and formula set (7) for "dedicated doubling" (which is more efficient than (5) if 2 points are the same).
But my code doesn't seem to compute the doubling correctly. Maybe it's also addition, which I have no way of telling, since I don't have another reference to compare to.
#!/usr/bin/env python3
import sys, secrets
from functools import reduce
def os2int(v):
ret = 0
for b in v: ret = (ret << 8) | b
return ret
# curve parameters from https://datatracker.ietf.org/doc/html/rfc8032#section-5.1
p = (1 << 255) - 19
a = -1
d_over = -121665
d_under = 121666
d = d_over * pow(d_under, p-2, p) % p # product of d_over with the modular inverse of d_under mod p
def point_add_ref(x1, y1, t1, z1, x2, y2, t2, z2):
x1y2 = x1 * y2 % p
x2y1 = x2 * y1 % p
x1x2 = x1 * x2 % p
y1y2 = y1 * y2 % p
z1z2 = z1 * z2 % p
t1t2 = t1 * t2 % p
x3 = (x1y2 + x2y1) * (z1z2 - d * t1t2) % p
y3 = (y1y2 - a * x1x2) * (z1z2 + d * t1t2) % p
t3 = (y1y2 - a * x1x2) * (x1y2 + x2y1) % p
z3 = (z1z2 - d * t1t2) * (z1z2 + d * t1t2) % p
return (x3, y3, t3, z3)
def point_dbl_ref(x1, y1, t1, z1):
xx = x1 * x1 % p
yy = y1 * y1 % p
zz = z1 * z1 % p
xy = x1 * y1 % p
t = 2 * xy % p
u = (yy + a * xx) % p
v = (yy - a * xx) % p
w = (2 * zz - yy - a * xx) % p
x3 = t * w % p
y3 = u * v % p
t3 = t * v % p
z3 = u * w % p
return (x3, y3, t3, z3)
def xytz_cmp(P,Q):
vec = (P[i] * Q[3] % p != P[3] * Q[i] % p for i in range(3))
return reduce(lambda a, b: a or b, vec)
if __name__ == "__main__":
fails = 0
slen = 12
for i in range(100):
P = (os2int(secrets.token_bytes(slen)),
os2int(secrets.token_bytes(slen)),
os2int(secrets.token_bytes(slen)),
os2int(secrets.token_bytes(slen)))
R3 = point_add_ref(*P, *P)
R4 = point_dbl_ref(*P)
if xytz_cmp(R3, R4): fails += 1
print("{} test(s) failed.".format(fails))
It's neither the addition nor the doubling that's incorrect. It's that you're generating points with random coordinates which are not valid curve points
I'm trying to understand the Karatsuba multiplication algorithm. I've written the following code:
def karatsuba_multiply(x, y):
# split x and y
len_x = len(str(x))
len_y = len(str(y))
if len_x == 1 or len_y == 1:
return x*y
n = max(len_x, len_y)
n_half = 10**(n // 2)
a = x // n_half
b = x % n_half
c = y // n_half
d = y % n_half
ac = karatsuba_multiply(a, c)
bd = karatsuba_multiply(b, d)
ad_plus_bc = karatsuba_multiply((a+b), (c+d)) - ac - bd
return (10**n * ac) + (n_half * ad_plus_bc) + bd
This test case does not work:
print(karatsuba_multiply(1234, 5678)) ## returns 11686652, should be 7006652
But if I use the following code from this answer, the test case produces the correct answer:
def karat(x,y):
if len(str(x)) == 1 or len(str(y)) == 1:
return x*y
else:
m = max(len(str(x)),len(str(y)))
m2 = m // 2
a = x // 10**(m2)
b = x % 10**(m2)
c = y // 10**(m2)
d = y % 10**(m2)
z0 = karat(b,d)
z1 = karat((a+b),(c+d))
z2 = karat(a,c)
return (z2 * 10**(2*m2)) + ((z1 - z2 - z0) * 10**(m2)) + (z0)
Both functions look like they're doing the same thing. Why doesn't mine work?
It seems that in with kerat_multiply implementation you can't use the correct formula for the last return.
In the original kerat implementation the value m2 = m // 2 is multiplied by 2 in the last return (z2 * 10**(2*m2)) + ((z1 - z2 - z0) * 10**(m2)) + (z0) (2*m2)
So you i think you need either to add a new variable as below where n2 == n // 2 so that you can multiply it by 2 in the last return, or use the original implementation.
Hoping it helps :)
EDIT: This is explain by the fact that 2 * n // 2 is different from 2 * (n // 2)
n = max(len_x, len_y)
n_half = 10**(n // 2)
n2 = n // 2
a = x // n_half
b = x % n_half
c = y // n_half
d = y % n_half
ac = karatsuba_multiply(a, c)
bd = karatsuba_multiply(b, d)
ad_plus_bc = karatsuba_multiply((a + b), (c + d)) - ac - bd
return (10**(2 * n2) * ac) + (n_half * (ad_plus_bc)) + bd
I am writing a code to solve coupled harmonic oscillator equations using odeint from scipy. I want to add a random number to one of the equations at every time step of the ODESolver. To do this, I have written two time dependent constants, and used them. However, this gives me the following error.
ODEintWarning: Excess work done on this call (perhaps wrong Dfun type). Run
with full_output = 1 to get quantitative information.
warnings.warn(warning_msg, ODEintWarning)
My code is given below.
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
import scipy.stats as stats
from scipy.stats import beta
m1 = 1.1
m2 = 1.0
k1 = 1000.0
k2 = 1000.0
k12 = 100
g = 0.0
global Steps
Steps = 0
x10 = 1
x20 = 0
alpha = 1
a = 2
b = 3
v10 = 0
v20 = 0
#A = np.random.beta(a,b, 10) * alpha
#B = np.random.beta(a,b, 10) * alpha
def c(t):
return np.random.beta(a,b) * alpha
def d(t):
return np.random.beta(a,b) * alpha
def f(x, t, c, d):
y = []
y.append(x[1] - c(t) * x[0])
#print(c(t))
y.append(-(k1 + k12) / m1 * x[0] + k12 / m1 * x[2] - 2 * g * x[1] - c(t) * x[1])
y.append(x[3] - d(t) * x[2])
y.append(-(k2 + k12) / m2 * x[2] + k12 / m2 * x[0] - 2 * g * x[3] - d(t) * x[3])
return y
b0 = [x10, v10, x20, v20]
b0 = np.array(b0)
args = (c, d)
t = np.linspace(0, 1, 1000 )
t = np.array(t)
X1, infodict = odeint(f, b0, t, args, full_output = 1)
X1 = X1.T
Q1 = X1[0]
Q2 = X1[2]
plt.plot(t, Q1, 'g-')
plt.plot(t, Q2, 'b-')
plt.show()
a = m1*m2
b = -(m1*(k2 + k12) + m2*(k1 + k12))
c = k1*k2 + k12*(k1 + k2)
wp = np.sqrt((-b + np.sqrt(b**2 - 4*a*c))/(2*a))
wm = np.sqrt((-b - np.sqrt(b**2 - 4*a*c))/(2*a))
print(wp)
print(wm)
f = open('simdata.csv', mode='w')
for i in range(len(t)):
p = str(t[i]) + ',' + str(Q1[i]) + ',' + str(Q2[i]) + '\n'
f.write(p)
f.close()
I have stiff system of differential equations given to the first-order ODE. This system is written in Maple. The default method used by Maple is the Rosenbrock method. Now my task is to solve these equations with python tools.
1) I do not know how to write the equations in the python code.
2) I do not know how to solve the equations with numpy, scipy, matplotlib or PyDSTool. For the library PyDSTool I did not find any examples at all, although I read that it is well suited for stiff systems.
Code:
import numpy
import scipy
import matplotlib
varepsilon = pow(10, -2); j = -2.5*pow(10, -2); e = 3.0; tau = 0.3; delta = 2.0
u0 = -math.sqrt(-1 + math.sqrt(varepsilon ** 2 + 12) / varepsilon) * math.sqrt(2) / 6
u = -math.sqrt(-1 + math.sqrt(varepsilon ** 2 + 12) / varepsilon) * math.sqrt(2) * (1 + delta) / 6
v = 1 / (1 - 2 / e) * math.sqrt(j ** 2 + (1 - 2 / e) * (e ** 2 * u ** 2 + 1))
y8 = lambda y1,y5,y7: 1 / (1 - 2 / y1) * math.sqrt(y5 ** 2 + (1 - 2 / y1) * (1 + y1 ** 2 * y7 ** 2))
E0 = lambda y1,y8: (1 - 2 / y1) * y8
Phi0 = lambda y1,y7: y1 ** 2 * y7
y08 = y8(y1=e, y5=j, y7=u0);
E = E0(y1=e, y8=y08); Phi = Phi0(y1=e, y7=u0)
# initial values
z01 = e; z03 = 0; z04 = 0; z05 = j; z07 = u0; z08 = y08;
p1 = -z1(x)*z5(x)/(z1(x)-2);
p3 = -z1(x)^2*z7(x);
p4 = z8(x)*(1-2/z1(x));
Q1 = -z5(x)^2/(z1(x)*(z1(x)-2))+(z8(x)^2/z1(x)^3-z7(x)^2)*(z1(x)-2);
Q3 = 2*z5(x)*z7(x)/z1(x);
Q4 = 2*z5(x)*z8(x)/(z1(x)*(z1(x)-2));
c1 = z1(x)*z7(x)*varepsilon;
c3 = -z1(x)*z5(x)*varepsilon;
C = z7(x)*varepsilon/z1(x)-z8(x)*(1-2/z1(x));
d1 = -z1(x)*z8(x)*varepsilon;
d3 = z1(x)*z5(x)*varepsilon;
B = z1(x)^2*z7(x)-z8(x)*varepsilon*(1-2/z1(x));
Omega = 1/(c1*d3*p3+c3*d1*p4-c3*d3*p1);
# differential equations
diff(z1(x), x) = z5(x);
diff(z3(x), x) = z7(x);
diff(z4(x), x) = z8(x);
diff(z5(x), x) = Omega*(-Q1*c1*d3*p3 - Q1*c3*d1*p4 + Q1*c3*d3*p1 + B*c3*p4 + C*d3*p3 + E*d3*p3 - Phi*c3*p4);
diff(z7(x), x) = -Omega*(Q3*c1*d3*p3 + Q3*c3*d1*p4 - Q3*c3*d3*p1 + B*c1*p4 - C*d1*p4 + C*d3*p1 - E*d1*p4 + E*d3*p1 - Phi*c1*p4);
diff(z8(x), x) = Omega*(-Q4*c1*d3*p3 - Q4*c3*d1*p4 + Q4*c3*d3*p1 + B*c1*p3 - B*c3*p1 - C*d1*p3 - E*d1*p3 - Phi*c1*p3 + Phi*c3*p1);
#features to be found and built curve
{z1(x), z3(x), z4(x), z5(x), z7(x), z8(x)}
After drifting on the Internet, I found something in principle:
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy import integrate
from scipy.signal import argrelextrema
from mpmath import mp, mpf
mp.dps = 50
varepsilon = pow(10, -2); j = 2.5*pow(10, -4); e = 3.0; tau = 0.5; delta = 2.0
u0 = -math.sqrt(-1 + math.sqrt(varepsilon ** 2 + 12) / varepsilon) * math.sqrt(2) / 6
u = -math.sqrt(-1 + math.sqrt(varepsilon ** 2 + 12) / varepsilon) * math.sqrt(2) * (1 + delta) / 6
v = 1 / (1 - 2 / e) * math.sqrt(j ** 2 + (1 - 2 / e) * (e ** 2 * u ** 2 + 1))
y8 = lambda y1,y5,y7: 1 / (1 - 2 / y1) * math.sqrt(y5 ** 2 + (1 - 2 / y1) * (1 + y1 ** 2 * y7 ** 2))
E0 = lambda y1,y8: (1 - 2 / y1) * y8
Phi0 = lambda y1,y7: y1 ** 2 * y7
y08 = y8(y1=e, y5=j, y7=u0);
E = E0(y1=e, y8=y08); Phi = Phi0(y1=e, y7=u0)
# initial values
z01 = e; z03 = 0.0; z04 = 0.0; z05 = j; z07 = u0; z08 = y08;
def model(x, z, varepsilon, E, Phi):
z1, z3, z4, z5, z7, z8 = z[0], z[1], z[2], z[3], z[4], z[5]
p1 = -z1*z5/(z1 - 2);
p3 = -pow(z1, 2) *z7;
p4 = z8*(1 - 2/z1);
Q1 = -pow(z5, 2)/(z1*(z1 - 2)) + (pow(z8, 2)/pow(z1, 3) - pow(z7, 2))*(z1 - 2);
Q3 = 2*z5*z7/z1;
Q4 = 2*z5*z8/(z1*(z1 - 2));
c1 = z1*z7*varepsilon;
c3 = -z1*z5*varepsilon;
C = z7*varepsilon/z1 - z8*(1 - 2/z1);
d1 = -z1*z8*varepsilon;
d3 = z1*z5*varepsilon;
B = pow(z1, 2)*z7 - z8*varepsilon*(1 - 2/z1);
Omega = 1/(c1*d3*p3+c3*d1*p4-c3*d3*p1);
# differential equations
dz1dx = z5;
dz3dx = z7;
dz4dx = z8;
dz5dx = Omega*(-Q1*c1*d3*p3 - Q1*c3*d1*p4 + Q1*c3*d3*p1 + B*c3*p4 + C*d3*p3 + E*d3*p3 - Phi*c3*p4);
dz7dx = -Omega*(Q3*c1*d3*p3 + Q3*c3*d1*p4 - Q3*c3*d3*p1 + B*c1*p4 - C*d1*p4 + C*d3*p1 - E*d1*p4 + E*d3*p1 - Phi*c1*p4);
dz8dx = Omega*(-Q4*c1*d3*p3 - Q4*c3*d1*p4 + Q4*c3*d3*p1 + B*c1*p3 - B*c3*p1 - C*d1*p3 - E*d1*p3 - Phi*c1*p3 + Phi*c3*p1);
dzdx = [dz1dx, dz3dx, dz4dx, dz5dx, dz7dx, dz8dx]
return dzdx
z0 = [z01, z03, z04, z05, z07, z08]
if __name__ == '__main__':
# Start by specifying the integrator:
# use ``vode`` with "backward differentiation formula"
r = integrate.ode(model).set_integrator('vode', method='bdf')
r.set_f_params(varepsilon, E, Phi)
# Set the time range
t_start = 0.0
t_final = 0.1
delta_t = 0.00001
# Number of time steps: 1 extra for initial condition
num_steps = np.floor((t_final - t_start)/delta_t) + 1
r.set_initial_value(z0, t_start)
t = np.zeros((int(num_steps), 1), dtype=np.float64)
Z = np.zeros((int(num_steps), 6,), dtype=np.float64)
t[0] = t_start
Z[0] = z0
k = 1
while r.successful() and k < num_steps:
r.integrate(r.t + delta_t)
# Store the results to plot later
t[k] = r.t
Z[k] = r.y
k += 1
# All done! Plot the trajectories:
Z1, Z3, Z4, Z5, Z7, Z8 = Z[:,0], Z[:,1] ,Z[:,2], Z[:,3], Z[:,4], Z[:,5]
plt.plot(t,Z1,'r-',label=r'$r(s)$')
plt.grid('on')
plt.ylabel(r'$r$')
plt.xlabel('proper time s')
plt.legend(loc='best')
plt.show()
plt.plot(t,Z5,'r-',label=r'$\frac{dr}{ds}$')
plt.grid('on')
plt.ylabel(r'$\frac{dr}{ds}$')
plt.xlabel('proper time s')
plt.legend(loc='best')
plt.show()
plt.plot(t, Z7, 'r-', label=r'$\frac{dϕ}{ds}$')
plt.grid('on')
plt.xlabel('proper time s')
plt.ylabel(r'$\frac{dϕ}{ds}$')
plt.legend(loc='upper center')
plt.show()
However, reviewing the solutions obtained by the library scipy,
I encountered the problem of inconsistency of the solutions obtained by scipy and Maple. The essence of the problem is that the solutions are quickly oscillating and the Maple catches these oscillations with high precision using Rosenbrock's method. While Pythonn has problems with this using Backward Differentiation Methods:
r = integrate.ode(model).set_integrator('vode', method='bdf')
http://www.scholarpedia.org/article/Backward_differentiation_formulas
I tried all the modes of integrating: “vode” ; “zvode”; “lsoda” ; “dopri5” ; “dop853” and I found that the best suited mode “vode” however, still does not meet my needs...
https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html
So this method catches oscillations in the range j ~ 10^{-5}-10^{-3}..While the maple shows good results for any j.
I present the results obtained by scipy for j ~ 10^{-2}:
enter image description here
enter image description here
and the results obtained by Maple for j ~ 10^{-2}:
enter image description here
enter image description here
It is important that oscillations are physical solutions! That is, the Python badly captures oscillations for j ~ 10^{-2}((. Can anyone tell me what I'm doing wrong?? how to look at the absolute error of integration?
I am slightly new to python and I am trying to convert some code.This is an approximation method. Which isn't important. In my oddev function I get returned
c2[1:modes+1] = v* 1j
ValueError: could not broadcast input array from shape (25) into shape (25,1)
When I do this Matlab I believe it automatically casts it, and will store the complex array. The function is a getting the coefficient from a partial sine transform to do this. At first I tried storing the random matrix which just an array using np.matlib method and this had the same shape but I believe I will lose the real values of the filter when I cast it. How do I store this?
import math
import numpy as np
def quickcontmin(datain):
n = np.shape(datain)[0]
m = math.floor(n / 2)
modes = math.floor(m / 2)
addl = 20
nn = 20 * n
chi = 10 ** -13
def evenhp(xv):
"Even high pass"
n1 = np.shape(xv)[0]
vx = np.array(xv[:-1])
vx = vx[::-1]
c1 = np.append(xv,vx)
c1 = np.fft.fft(c1)
c1[0:modes-1] = 0.0
c1[-1 - modes + 2:-1] = 0.0
evenl = np.real(np.fft.ifft(c1))
even = evenl[0:n1-1]
return even
def evenhpt(xv):
" Transpose of EvenHP"
n1 = np.shape(xv)[0]
xy = np.zeros((n1- 2, 1))
c1 = np.append(xv,xy)
c1 = np.fft.fft(c1)
c1[0:modes-1] = 0.0
c1[-1 - modes + 1:-1] = 0.0
evenl = np.real(np.fft.ifft(c1))
even = evenl[0:n1-1]
even[1:-2] = even[1:-2] + evenl[-1:-1:n1+1]
return even``
def evenlp(xv):
" Low pass cosine filter"
n1 = np.shape(xv)[0]
vx = np.array(xv[:-1])
vx = vx[::-1]
c1 = np.append(xv,vx)
c1 = np.fft.fft(c1)
c1[modes + 1:-1 - modes + 1] = 0.0
evenl = np.real(np.fft.ifft(c1))
even = evenl[0:n1-1]
return even
def oddev(xv):
"Evaluate the sine modes on the grid"
c2 = np.zeros((2 *n - 2, 1))*1j
v = np.array(xv[:])
v1 = v[:-1]
v1 = v[::-1]
c2[1:modes+1] = v* 1j
c2[-1 - modes + 1:-1] = -v1* 1j
evall = np.fft.ifft(c2) * math.sqrt(2 * n - 2)
eva = evall[0:n-1]
return eva
def oddevt(xv):
" Transpose the sine modes on the function OddEv"
c1 = np.array(xv[1:-2])
c1 = np.insert(c1,0.0,0)
c1 = np.append(c1,0.0)
c1 = np.append(c1,xv[-2:-1:2])
c1a = np.divide(np.fft.fft(c1),math.sqrt(2 * n - 2))
fcoef = np.imag(c1a[1:modes])
return fcoef
def eextnd(xv):
"Obtain cosine coefficients and evalue on the refined grid"
vx = np.array(xv[:-1])
vx = vx[::-1]
c1 = np.append(xv,vx)
c1 = np.fft.fft(c1)
cL = np.zeros((2*nn-2,1))
cL[0:modes-1] = c1[0:modes-1]
cL[-1 - modes + 1:-1] = c1[-1 - modes + 1:-1]
evenexL = np.multiply(np.fft.ifft(cL) , (nn - 1) / (n - 1))
evenex = evenexL[0:nn-1]
return evenex
def oextnd(xv):
"Evaluate sine coefficients on the refined grid"
c2 = np.zeros((2 * nn - 2, 1))
c2[0] = 0.0
c2[1:modes + 1] = np.multiply(xv[0:-1],1j)
c2[-1 - modes + 1:-1] = np.multiply(-xv[-1:-1:1],1j)
evall = np.real(np.multiply(np.fft.ifft(c2), math.sqrt(2 * n - 2) * (2 *nn - 2) / (2 * n - 2)))
oox = evall[0:nn-1]
return oox
dc = evenlp(datain)
#L in paper, number of vectors used to sample the columnspace
lll = round(4 * math.log(m )/ math.log(2)) + addl
lll = int(lll)
#The following should be straightforward from the psuedo-code
w=2 * np.random.rand(modes , lll) - 1
p=np.matlib.zeros(shape=(n,lll))
for j in range(lll):
p[:,j] = evenhp(oddev(w[:,j]))
q,r = np.linalg.qr(p , mode='reduced')
z = np.zeros(shape=(modes,lll))
for j in range(lll):
z[:,j]= oddevt(evenhpt(q[:,j]))
un,s,v = np.linalg.svd(z,full_matrices='False')
ds=np.diag(s)
aa=np.extract(np.diag(s)>(chi))
aa[-1] = aa
aa = int(aa)
s = 0 * s
for j in range(aa):
s[j,j] = 1.0 / ds(j)
#find the sine coefficents
b=un*s* v.T* q.T* evenhp(datain)
#Constructing the continuation
exs=oddev(b)
pexs = evenlp(exs)
dataCont=exs-pexs+dc
dataCont[n+1:2*n-2]=-exs[-2:-1:1]-pexs[-2:-1:1]+dc[-2:-1:1]
#Evaluate the continuation on the refined grid
dataRefined=eextnd(dc-exs)+oextnd(b)
return dataRefined, dataCont
n1 = 100
t = np.linspace(0,2*math.pi,n1)
y = np.sin(t)
data = quickcontmin(y)
dc1 = data[1]
dc1 = dc1[0:n1-1]`
Replacing c2[1:modes+1] = v* 1j by c2[1:modes+1, 0] = v* 1j should fix that specific error.
More consistent would be to replace:
v = np.array(xv[:])
v1 = v[:-1]
v1 = v[::-1]
by
v = xv
v1 = v[:-1]
v is already a column vector so you don't need to transform it into a 1d vector when you later need a column vector.