How can I print the solutions as a fraction, not like that? - python

**I am trying to make a function return the results of a Quadratic equation, But I can't figure out how to print the solutions as fraction. Please help me! **
def cube_root(x):
return x**(1/3)
def Quadratic(a, b, c):
delta = (b**2)-4*a*c
if delta == 0:
x = (-b)/2*a
return f"This Quadratic equation has 1 solution: {x}"
else:
if delta < 0 :
return "This Quadratic equation has no solutions: "
else:
x1 = ((-b)-cube_root(delta))/2*a
x2 = ((-b)+cube_root(delta))/2*a
return f"This Quadratic equation has 2 solutions: {x1} & {x2}"
print(Quadratic(12, 0, -1))

You can use simplify from the sympy package (not in the standard library - you'll have to install it):
from sympy import simplify, sqrt
def quadratic(a, b, c):
a = simplify(a) # convert inputs into objects used by simplify
b = simplify(b)
c = simplify(c)
delta = (b**2)-4*a*c
if delta == 0:
x = (-b)/2*a
return f"This Quadratic equation has 1 solution: {x}"
elif delta < 0 :
return "This Quadratic equation has no real solutions: "
else:
x1 = ((-b)-sqrt(delta))/2*a # using sqrt from sympy
x2 = ((-b)+sqrt(delta))/2*a
return f"This Quadratic equation has 2 solutions: {x1} & {x2}"
print(quadratic(12, 0, -1))
This gives:
This Quadratic equation has 2 solutions: -24*sqrt(3) & 24*sqrt(3)
Different example:
print(quadratic(12, 2, -1))
gives:
This Quadratic equation has 2 solutions: -12*sqrt(13) - 12 & -12 + 12*sqrt(13)
Actually sympy can also handle complex numbers for you, so you can get rid of your test for no real solutions (i.e. remove the elif, so that delta < 0 is handled by the else: block).
If you do this and then give it the example:
print(quadratic(12, 2, 1))
you get:
This Quadratic equation has 2 solutions: -12 - 12*sqrt(11)*I & -12 + 12*sqrt(11)*I

If you don't want extra packages, perhaps the following might help:
from fractions import Fraction
def is_square(x):
if x < 0: return False
s = int(x**0.5)
return s*s == x
def sqrt_frac_str(frac):
if frac < 0:
return f'i {sqrt_frac_str(-frac)}'
num_isq = is_square(frac.numerator)
den_isq = is_square(frac.denominator)
if num_isq and den_isq:
return f'{int(frac.numerator**0.5)}/{int(frac.denominator**0.5)}'
elif num_isq:
return f'{int(frac.numerator**0.5)}/sqrt({frac.denominator})'
elif den_isq:
return f'sqrt({frac.numerator})/{int(frac.denominator**0.5)}'
else:
return f'sqrt({frac})'
def quadratic_frac(a, b, c):
delta = Fraction(b**2 - 4 * a * c)
rootcenter = Fraction(-b, 2 * a)
rootdeltasq = delta / Fraction(2 * a)**2
return rootcenter, rootdeltasq
def quadsol_str(rootcenter, rootdeltasq):
return f'{rootcenter} +/- {sqrt_frac_str(rootdeltasq)}'
Tests:
rc, rd = quadratic_frac(2, 1, -1)
rc, rd
# (Fraction(-1, 4), Fraction(9, 16))
quadsol_str(*quadratic_frac(2, 1, -1))
# '-1/4 +/- 3/4'
quadsol_str(*quadratic_frac(2, 0, -1))
# '0 +/- 1/sqrt(2)'
quadsol_str(*quadratic_frac(2, 0, 1))
# '0 +/- i 1/sqrt(2)'
quadsol_str(*quadratic_frac(3, 2, -1))
# '-1/3 +/- 2/3'
quadsol_str(*quadratic_frac(5, 3, -7))
# '-3/10 +/- sqrt(149)/10'

Related

NIST Suite Test for Nonlinear dynamical system

In my following code i m running a lorentz chaotic equation from which i will get random numbers in terms of xs , ys and zs
import numpy as np
def lorenz(x, y, z, a=10,b=8/3,c=28 ):
x_dot = a*(y -x)
y_dot = - y +c*x - x*z
z_dot = -b*z + x*y
return x_dot, y_dot, z_dot
dt = 0.01
num_steps = 10000
# Need one more for the initial values
xs = np.empty(num_steps + 1)
ys = np.empty(num_steps + 1)
zs = np.empty(num_steps + 1)
# Set initial values
xs[0], ys[0], zs[0]= (1,1,1)
# Step through "time", calculating the partial derivatives at the current point
# and using them to estimate the next point
for i in range(num_steps):
x_dot, y_dot, z_dot= lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
I am actually trying to test the xs, ys and zs value for random number generating test via NIST 800 by using the code below
from __future__ import print_function
import math
from fractions import Fraction
from scipy.special import gamma, gammainc, gammaincc
# from gamma_functions import *
import numpy
import cmath
import random
#ones_table = [bin(i)[2:].count('1') for i in range(256)]
def count_ones_zeroes(bits):
ones = 0
zeroes = 0
for bit in bits:
if (bit == 1):
ones += 1
else:
zeroes += 1
return (zeroes,ones)
def runs_test(bits):
n = len(bits)
zeroes,ones = count_ones_zeroes(bits)
prop = float(ones)/float(n)
print(" prop ",prop)
tau = 2.0/math.sqrt(n)
print(" tau ",tau)
if abs(prop-0.5) > tau:
return (False,0.0,None)
vobs = 1.0
for i in range(n-1):
if bits[i] != bits[i+1]:
vobs += 1.0
print(" vobs ",vobs)
p = math.erfc(abs(vobs - (2.0*n*prop*(1.0-prop)))/(2.0*math.sqrt(2.0*n)*prop*(1-prop) ))
success = (p >= 0.01)
return (success,p,None)
print(runs_test(xs))
#%%
from __future__ import print_function
import math
def count_ones_zeroes(bits):
ones = 0
zeroes = 0
for bit in bits:
if (bit == 1):
ones += 1
else:
zeroes += 1
return (zeroes,ones)
def monobit_test(bits):
n = len(bits)
zeroes,ones = count_ones_zeroes(bits)
s = abs(ones-zeroes)
print(" Ones count = %d" % ones)
print(" Zeroes count = %d" % zeroes)
p = math.erfc(float(s)/(math.sqrt(float(n)) * math.sqrt(2.0)))
success = (p >= 0.01)
return (success,p,None)
print(runs_test(xs))
the output which i m getting is false i.e
output:
prop 0.00019998000199980003
tau 0.01999900007499375
(False, 0.0, None)
what should i do now?
The Lorenz system is chaotic, not random. You implemented the differential equation solver well, but it seems that count_ones_zeroes doesn't do what its name implies, at least, not on the data you provide. on xs, it returns that (zeroes, ones) = (9999, 2), which is not what you want. The code checks the value within the xs array, i.e. an x value (e.g. 8.2) against 1, but x is a float between -20 and 20, so it will be usually non1, and will be counted as 0. Only x==1 will be counted as ones.
In python, int/int results in float, so there is no need to cast it to float, in contrast to e.g. C or C++, so instead of prop = float(ones)/float(n), you can write prop = ones/n Similar statements hold for +,- and *

Find the inverse (reciprocal) of a polynomial modulo another polynomial with coefficients in a finite field

If I have a polynomial P, is there a way to calculate P^-1 modulo Q, being Q another polynomial?
I know that the coefficients of both polynomials belongs to the field of integers modulo z, being z an integer.
I´m not sure if SymPy has already a function for that in its galoistools module.
This is essentially the same as finding polynomials S, T such that PS + QT = 1. Which is possible when gcd(P, Q) = 1, and can be done with galoistools.gf_gcdex. For example, let's invert 3x^3+2x+4 modulo x^2+2x+3 with the coefficient field Z/11Z:
from sympy.polys.domains import ZZ
from sympy.polys.galoistools import gf_gcdex
p = ZZ.map([3, 0, 2, 4])
q = ZZ.map([1, 2, 3])
z = 11
s, t, g = gf_gcdex(p, q, z, ZZ)
if len(g) == 1 and g[0] == 1:
print(s)
else:
print('no inverse')
This prints [8, 5] - the inverse is 8x+5. Sanity check by hand:
(3x^3+2x+4)*(8x+5) = 24x^4 + 15x^3 + 16x^2 + 42x + 20
= 2x^4 + 4x^3 + 5x^2 + 9x + 9
= (x^2 + 2x + 3)*(2x^2 - 1) + 1
= 1 mod q

Roots of a quadratic function: math domain error [duplicate]

This question already has an answer here:
ValueError: math domain error - Quadratic Equation (Python)
(1 answer)
Closed 4 years ago.
I want to define a function that returns the values of the roots. It is supposed to return always something.
If b**2 - 4ac < 0, then it is supposed to return [ ], but it appears as an error.
My code is this by now:
from math import*
def solve(a, b, c):
x = sqrt(b**2 - 4*a*c)
if x > 0:
x1 = (-b + x)/(2*a)
x2 = (-b - x)/(2*a)
return [x1, x2]
elif x == 0:
x1 = x2 = -b/(2*a)
return [x1]
else:
return []
The math.sqrt is undefined for negative numbers and thus returns a ValueError.
If you wish to return the complex square root for negatives, use x**0.5:
x = (b**2 - 4*a*c)**0.5
Alternatively use the cmath.sqrt implementation:
from cmath import sqrt
x = sqrt(b**2 - 4*a*c)
sqrt will not accept negative values. To avoid this, you can check your 'else' condition before computing the square root:
from math import sqrt
def solve(a, b, c):
formula = b**2 - 4*a*c
if formula < 0:
return []
x = sqrt(formula)
if x > 0:
x1 = (-b + x)/(2*a)
x2 = (-b - x)/(2*a)
return [x1, x2]
elif x == 0:
x1 = x2 = -b/(2*a)
return [x1]

Pure-Python inverse error function

Are there any pure-python implementations of the inverse error function?
I know that SciPy has scipy.special.erfinv(), but that relies on some C extensions. I'd like a pure python implementation.
I've tried writing my own using the Wikipedia and Wolfram references, but it always seems to diverge from the true value when the arg is > 0.9.
I've also attempted to port the underlying C code that Scipy uses (ndtri.c and the cephes polevl.c functions) but that's also not passing my unit tests.
Edit: As requested, I've added the ported code.
Docstrings (and doctests) have been removed because they're longer than the functions. I haven't yet put much effort into making the port more pythonic - I'll worry about that once I get something that passes unit tests.
Supporting functions from cephes polevl.c
def polevl(x, coefs, N):
ans = 0
power = len(coefs) - 1
for coef in coefs[:N]:
ans += coef * x**power
power -= 1
return ans
def p1evl(x, coefs, N):
return polevl(x, [1] + coefs, N)
Main Inverse Error Function
def inv_erf(z):
if z < -1 or z > 1:
raise ValueError("`z` must be between -1 and 1 inclusive")
if z == 0:
return 0
if z == 1:
return math.inf
if z == -1:
return -math.inf
# From scipy special/cephes/ndrti.c
def ndtri(y):
# approximation for 0 <= abs(z - 0.5) <= 3/8
P0 = [
-5.99633501014107895267E1,
9.80010754185999661536E1,
-5.66762857469070293439E1,
1.39312609387279679503E1,
-1.23916583867381258016E0,
]
Q0 = [
1.95448858338141759834E0,
4.67627912898881538453E0,
8.63602421390890590575E1,
-2.25462687854119370527E2,
2.00260212380060660359E2,
-8.20372256168333339912E1,
1.59056225126211695515E1,
-1.18331621121330003142E0,
]
# Approximation for interval z = sqrt(-2 log y ) between 2 and 8
# i.e., y between exp(-2) = .135 and exp(-32) = 1.27e-14.
P1 = [
4.05544892305962419923E0,
3.15251094599893866154E1,
5.71628192246421288162E1,
4.40805073893200834700E1,
1.46849561928858024014E1,
2.18663306850790267539E0,
-1.40256079171354495875E-1,
-3.50424626827848203418E-2,
-8.57456785154685413611E-4,
]
Q1 = [
1.57799883256466749731E1,
4.53907635128879210584E1,
4.13172038254672030440E1,
1.50425385692907503408E1,
2.50464946208309415979E0,
-1.42182922854787788574E-1,
-3.80806407691578277194E-2,
-9.33259480895457427372E-4,
]
# Approximation for interval z = sqrt(-2 log y ) between 8 and 64
# i.e., y between exp(-32) = 1.27e-14 and exp(-2048) = 3.67e-890.
P2 = [
3.23774891776946035970E0,
6.91522889068984211695E0,
3.93881025292474443415E0,
1.33303460815807542389E0,
2.01485389549179081538E-1,
1.23716634817820021358E-2,
3.01581553508235416007E-4,
2.65806974686737550832E-6,
6.23974539184983293730E-9,
]
Q2 = [
6.02427039364742014255E0,
3.67983563856160859403E0,
1.37702099489081330271E0,
2.16236993594496635890E-1,
1.34204006088543189037E-2,
3.28014464682127739104E-4,
2.89247864745380683936E-6,
6.79019408009981274425E-9,
]
s2pi = 2.50662827463100050242
code = 1
if y > (1.0 - 0.13533528323661269189): # 0.135... = exp(-2)
y = 1.0 - y
code = 0
if y > 0.13533528323661269189:
y = y - 0.5
y2 = y * y
x = y + y * (y2 * polevl(y2, P0, 4) / p1evl(y2, Q0, 8))
x = x * s2pi
return x
x = math.sqrt(-2.0 * math.log(y))
x0 = x - math.log(x) / x
z = 1.0 / x
if x < 8.0: # y > exp(-32) = 1.2664165549e-14
x1 = z * polevl(z, P1, 8) / p1evl(z, Q1, 8)
else:
x1 = z * polevl(z, P2, 8) / p1evl(z, Q2, 8)
x = x0 - x1
if code != 0:
x = -x
return x
result = ndtri((z + 1) / 2.0) / math.sqrt(2)
return result
I think the error in your code is in the for loop over coefficients in the polevl function. If you replace what you have with the function below everything seems to work.
def polevl(x, coefs, N):
ans = 0
power = len(coefs) - 1
for coef in coefs:
ans += coef * x**power
power -= 1
return ans
I have tested it against scipy's implementation with the following code:
import numpy as np
from scipy.special import erfinv
N = 100000
x = np.random.rand(N) - 1.
# Calculate the inverse of the error function
y = np.zeros(N)
for i in range(N):
y[i] = inv_erf(x[i])
assert np.allclose(y, erfinv(x))
sympy? some digging may be needed to see how its implemented internally http://docs.sympy.org/latest/modules/functions/special.html#sympy.functions.special.error_functions.erfinv
from sympy import erfinv
erfinv(0.9).evalf(30)
1.16308715367667425688580351562

Way to solve constraint satisfaction faster than brute force?

I have a CSV that provides a y value for three different x values for each row. When read into a pandas DataFrame, it looks like this:
5 10 20
0 -13.6 -10.7 -10.3
1 -14.1 -11.2 -10.8
2 -12.3 -9.4 -9.0
That is, for row 0, at 5 the value is -13.6, at 10 the value is -10.7, and at 20 the value is -10.3. These values are the result of an algorithm in the form:
def calc(x, r, b, c, d):
if x < 10:
y = (x * r + b) / x
elif x >= 10 and x < 20:
y = ((x * r) + (b - c)) / x
else:
y = ((x * r) + (b - d)) / x
return y
I want to find the value of r, b, c, and d for each row. I know certain things about each of the values. For example, for each row: r is in np.arange(-.05, -.11, -.01), b is in np.arange(0, -20.05, -.05), and c and d are in np.arange(0, 85, 5). I also know that d is <= c.
Currently, I am solving this with brute force. For each row, I iterate through every combination of r, b, c, and d and test if the value at the three x values is equal to the known value from the DataFrame. This works, giving me a few combinations for each row that are basically the same except for rounding differences.
The problem is that this approach takes a long time when I need to run it against 2,000+ rows. My question is: is there a faster way than iterating and testing every combination? My understanding is that this is a constraint satisfaction problem but, after that, I have no idea what to narrow in on; there are so many types of constraint satisfaction problems (it seems) that I'm still lost (I'm not even certain that this is such a problem!). Any help in pointing me in the right direction would be greatly appreciated.
I hope i understood the task correctly.
If you know the resolution/discretization of the parameters, it looks like a discrete-optimization problem (in general: hard), which could be solved by CP-approaches.
But if you allow these values to be continuous (and reformulate the formulas), it is:
(1) a Linear Program: if checking for feasible values (there needs to be a valid solution)
(2) a Linear Program: if optimizing parameters for minimization of sum of absolute differences (=errors)
(3) a Quadratic Program: if optimizing parameters for minimization of sum of squared differences (=errors) / equivalent to minimizing euclidean-norm
All three versions can be solved efficiently!
Here is a non-general (could be easily generalized) implementation of (3) using cvxpy to formulate the problem and ecos to solve the QP. Both tools are open-source.
Code
import numpy as np
import time
from cvxpy import *
from random import uniform
""" GENERATE TEST DATA """
def sample_params():
while True:
r = uniform(-0.11, -0.05)
b = uniform(-20.05, 0)
c = uniform(0, 85)
d = uniform(0, 85)
if d <= c:
return r, b, c, d
def calc(x, r, b, c, d):
if x < 10:
y = (x * r + b) / x
elif x >= 10 and x < 20:
y = ((x * r) + (b - c)) / x
else:
y = ((x * r) + (b - d)) / x
return y
N = 2000
sampled_params = [sample_params() for i in range(N)]
data_5 = np.array([calc(5, *sampled_params[i]) for i in range(N)])
data_10 = np.array([calc(10, *sampled_params[i]) for i in range(N)])
data_20 = np.array([calc(20, *sampled_params[i]) for i in range(N)])
data = np.empty((N, 3))
for i in range(N):
data[i, :] = [data_5[i], data_10[i], data_20[i]]
""" SOLVER """
def solve(row):
""" vars """
R = Variable(1)
B = Variable(1)
C = Variable(1)
D = Variable(1)
E = Variable(3)
""" constraints """
constraints = []
# bounds
constraints.append(R >= -.11)
constraints.append(R <= -.05)
constraints.append(B >= -20.05)
constraints.append(B <= 0.0)
constraints.append(C >= 0.0)
constraints.append(C <= 85.0)
constraints.append(D >= 0.0)
constraints.append(D <= 85.0)
constraints.append(D <= C)
# formula of model
constraints.append((1.0 / 5.0) * B + R == row[0] + E[0]) # alternate function form: b/x+r
constraints.append((1.0 / 10.0) * B - (1.0 / 10.0) * C == row[1] + E[1]) # alternate function form: b/x-c/x+r
constraints.append((1.0 / 20.0) * B - (1.0 / 20.0) * D == row[2] + E[2]) # alternate function form: b/x-d/x+r
""" Objective """
objective = Minimize(norm(E, 2))
""" Solve """
problem = Problem(objective, constraints)
problem.solve(solver=ECOS, verbose=False)
return R.value, B.value, C.value, D.value, E.value
start = time.time()
for i in range(N):
r, b, c, d, e = solve(data[i])
end = time.time()
print('seconds taken: ', end-start)
print('seconds per row: ', (end-start) / N)
Output
('seconds taken: ', 20.620506048202515)
('seconds per row: ', 0.010310253024101258)

Categories