""" Implements the PSLQ algorithm for integer relation detection, and derivative algorithms for constant recognition. """ from mptypes import (mpf, eps, mp, mpmathify, nstr, inf) from functions import (log, exp, sqrt) from libmpf import to_fixed, from_man_exp, MODE from libelefun import sqrt_fixed # round to nearest integer (can be done more elegantly...) def round_fixed(x, prec): return ((x + (1<<(prec-1))) >> prec) << prec def pslq(x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False): r""" Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)`` uses the PSLQ algorithm to find a list of integers `[c_0, c_1, ..., c_n]` such that .. math :: |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol} and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector exists, :func:`pslq` returns ``None``. The tolerance defaults to 3/4 of the working precision. **Examples** Find rational approximations for `\pi`:: >>> from mpmath import * >>> mp.dps = 15 >>> pslq([pi, 1], tol=0.01) [-7, 22] >>> pslq([pi, 1], tol=0.001) [113, -355] Pi is not a rational number with denominator less than 1000:: >>> pslq([pi, 1]) >>> To within the standard precision, it can however be approximated by at least one rational number with denominator less than `10^{12}`:: >>> pslq([pi, 1], maxcoeff=10**12) [-75888275702L, 238410049439L] >>> print mpf(_[1])/_[0] -3.14159265358979 The PSLQ algorithm can be applied to long vectors. For example, we can investigate the rational (in)dependence of integer square roots:: >>> mp.dps = 30 >>> pslq([sqrt(n) for n in range(2, 5+1)]) >>> >>> pslq([sqrt(n) for n in range(2, 6+1)]) >>> >>> pslq([sqrt(n) for n in range(2, 8+1)]) [2, 0, 0, 0, 0, 0, -1] **Machin formulas** A famous formula for `\pi` is Machin's, .. math :: \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239 There are actually infinitely many formulas of this type. Two others are .. math :: \frac{\pi}{4} = \operatorname{acot} 1 \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57 + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443 We can easily verify the formulas using the PSLQ algorithm:: >>> mp.dps = 30 >>> pslq([pi/4, acot(1)]) [1, -1] >>> pslq([pi/4, acot(5), acot(239)]) [1, -4, 1] >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)]) [1, -12, -32, 5, -12] We could try to generate a custom Machin-like formula by running the PSLQ algorithm with a few inverse cotangent values, for example acot(2), acot(3) ... acot(10). Unfortunately, there is a linear dependence among these values, resulting in only that dependence being detected, with a zero coefficient for `\pi`:: >>> pslq([pi] + [acot(n) for n in range(2,11)]) [0, 1, -1, 0, 0, 0, -1, 0, 0, 0] We get better luck by removing linearly dependent terms:: >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)]) [1, -8, 0, 0, 4, 0, 0, 0] In other words, we found the following formula:: >>> print 8*acot(2) - 4*acot(7) 3.14159265358979323846264338328 >>> print pi 3.14159265358979323846264338328 **Algorithm** This is a fairly direct translation to Python of the pseudocode given by David Bailey, "The PSLQ Integer Relation Algorithm": http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html The present implementation uses fixed-point instead of floating-point arithmetic, since this is significantly (about 7x) faster. """ n = len(x) assert n >= 2 # At too low precision, the algorithm becomes meaningless prec = mp.prec assert prec >= 53 if verbose and prec // max(2,n) < 5: print "Warning: precision for PSLQ may be too low" target = int(prec * 0.75) if tol is None: tol = mpf(2)**(-target) else: tol = mpmathify(tol) extra = 60 prec += extra if verbose: print "PSLQ using prec %i and tol %s" % (prec, nstr(tol)) tol = to_fixed(tol._mpf_, prec) assert tol # Convert to fixed-point numbers. The dummy None is added so we can # use 1-based indexing. (This just allows us to be consistent with # Bailey's indexing. The algorithm is 100 lines long, so debugging # a single wrong index can be painful.) x = [None] + [to_fixed(mpf(xk)._mpf_, prec) for xk in x] # Sanity check on magnitudes minx = min(abs(xx) for xx in x[1:]) if not minx: raise ValueError("PSLQ requires a vector of nonzero numbers") if minx < tol//100: if verbose: print "STOPPING: (one number is too small)" return None g = sqrt_fixed((4<> prec) s[k] = sqrt_fixed(t, prec) t = s[1] y = x[:] for k in xrange(1, n+1): y[k] = (x[k] << prec) // t s[k] = (s[k] << prec) // t # step 3 for i in xrange(1, n+1): for j in xrange(i+1, n): H[i,j] = 0 if i <= n-1: if s[i]: H[i,i] = (s[i+1] << prec) // s[i] else: H[i,i] = 0 for j in range(1, i): sjj1 = s[j]*s[j+1] if sjj1: H[i,j] = ((-y[i]*y[j])<> prec) for k in xrange(1, j+1): H[i,k] = H[i,k] - (t*H[j,k] >> prec) for k in xrange(1, n+1): A[i,k] = A[i,k] - (t*A[j,k] >> prec) B[k,j] = B[k,j] + (t*B[k,i] >> prec) # Main algorithm for REP in range(maxsteps): # Step 1 m = -1 szmax = -1 for i in range(1, n): h = H[i,i] sz = (sqrt_fixed((4<> (prec*(i-1)) if sz > szmax: m = i szmax = sz # Step 2 y[m], y[m+1] = y[m+1], y[m] tmp = {} for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i] for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i] for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m] # Step 3 if m <= n - 2: t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec) # A zero element probably indicates that the precision has # been exhausted. XXX: this could be spurious, due to # using fixed-point arithmetic if not t0: break t1 = (H[m,m] << prec) // t0 t2 = (H[m,m+1] << prec) // t0 for i in xrange(m, n+1): t3 = H[i,m] t4 = H[i,m+1] H[i,m] = (t1*t3+t2*t4) >> prec H[i,m+1] = (-t2*t3+t1*t4) >> prec # Step 4 for i in xrange(m+1, n+1): for j in xrange(min(i-1, m+1), 0, -1): try: t = round_fixed((H[i,j] << prec)//H[j,j], prec) # Precision probably exhausted except ZeroDivisionError: break y[j] = y[j] + ((t*y[i]) >> prec) for k in xrange(1, j+1): H[i,k] = H[i,k] - (t*H[j,k] >> prec) for k in xrange(1, n+1): A[i,k] = A[i,k] - (t*A[j,k] >> prec) B[k,j] = B[k,j] + (t*B[k,i] >> prec) # Until a relation is found, the error typically decreases # slowly (e.g. a factor 1-10) with each step TODO: we could # compare err from two successive iterations. If there is a # large drop (several orders of magnitude), that indicates a # "high quality" relation was detected. Reporting this to # the user somehow might be useful. best_err = maxcoeff<> prec) for j in \ range(1,n+1)] if max(abs(v) for v in vec) < maxcoeff: if verbose: print "FOUND relation at iter %i/%i, error: %s" % \ (REP, maxsteps, nstr(err / mpf(2)**prec, 1)) return vec best_err = min(err, best_err) # Calculate a lower bound for the norm. We could do this # more exactly (using the Euclidean norm) but there is probably # no practical benefit. recnorm = max(abs(h) for h in H.values()) if recnorm: norm = ((1 << (2*prec)) // recnorm) >> prec norm //= 100 else: norm = inf if verbose: print "%i/%i: Error: %8s Norm: %s" % \ (REP, maxsteps, nstr(best_err / mpf(2)**prec, 1), norm) if norm >= maxcoeff: break if verbose: print "CANCELLING after step %i/%i." % (REP, maxsteps) print "Could not find an integer relation. Norm bound: %s" % norm return None def findpoly(x, n=1, **kwargs): r""" ``findpoly(x, n)`` returns the coefficients of an integer polynomial `P` of degree at most `n` such that `P(x) \approx 0`. If no polynomial having `x` as a root can be found, :func:`findpoly` returns ``None``. :func:`findpoly` works by successively calling :func:`pslq` with the vectors `[1, x]`, `[1, x, x^2]`, `[1, x, x^2, x^3]`, ..., `[1, x, x^2, .., x^n]` as input. Keyword arguments given to :func:`findpoly` are forwarded verbatim to :func:`pslq`. In particular, you can specify a tolerance for `P(x)` with ``tol`` and a maximum permitted coefficient size with ``maxcoeff``. For large values of `n`, it is recommended to run :func:`findpoly` at high precision; preferrably 50 digits or more. **Examples** By default (degree `n = 1`), :func:`findpoly` simply finds a linear polynomial with a rational root:: >>> from mpmath import * >>> mp.dps = 15 >>> findpoly(0.7) [-10, 7] The generated coefficient list is valid input to ``polyval`` and ``polyroots``:: >>> nprint(polyval(findpoly(phi, 2), phi), 1) -2.0e-16 >>> for r in polyroots(findpoly(phi, 2)): ... print r ... -0.618033988749895 1.61803398874989 Numbers of the form `m + n \sqrt p` for integers `(m, n, p)` are solutions to quadratic equations. As we find here, `1+\sqrt 2` is a root of the polynomial `x^2 - 2x - 1`:: >>> findpoly(1+sqrt(2), 2) [1, -2, -1] >>> print findroot(lambda x: x**2 - 2*x - 1, 1) 2.4142135623731 Despite only containing square roots, the following number results in a polynomial of degree 4:: >>> findpoly(sqrt(2)+sqrt(3), 4) [1, 0, -10, 0, 1] In fact, `x^4 - 10x^2 + 1` is the *minimal polynomial* of `r = \sqrt 2 + \sqrt 3`, meaning that a rational polynomial of lower degree having `r` as a root does not exist. Given sufficient precision, :func:`findpoly` will usually find the correct minimal polynomial of a given algebraic number. **Non-algebraic numbers** If :func:`findpoly` fails to find a polynomial with given coefficient size and tolerance constraints, that means no such polynomial exists. We can verify that `\pi` is not an algebraic number of degree 3 with coefficients less than 1000:: >>> mp.dps = 15 >>> findpoly(pi, 3) >>> It is always possible to find an algebraic approximation of a number using one (or several) of the following methods: 1. Increasing the permitted degree 2. Allowing larger coefficients 3. Reducing the tolerance One example of each method is shown below:: >>> mp.dps = 15 >>> findpoly(pi, 4) [95, -545, 863, -183, -298] >>> findpoly(pi, 3, maxcoeff=10000) [836, -1734, -2658, -457] >>> findpoly(pi, 3, tol=1e-7) [-4, 22, -29, -2] It is unknown whether Euler's constant is transcendental (or even irrational). We can use :func:`findpoly` to check that if is an algebraic number, its minimal polynomial must have degree at least 7 and a coefficient of magnitude at least 1000000:: >>> mp.dps = 200 >>> findpoly(euler, 6, maxcoeff=10**6, tol=1e-100, maxsteps=1000) >>> Note that the high precision and strict tolerance is necessary for such high-degree runs, since otherwise unwanted low-accuracy approximations will be detected. It may also be necessary to set maxsteps high to prevent a premature exit (before the coefficient bound has been reached). Running with ``verbose=True`` to get an idea what is happening can be useful. """ x = mpf(x) assert n >= 1 if x == 0: return [1, 0] xs = [mpf(1)] for i in range(1,n+1): xs.append(x**i) a = pslq(xs, **kwargs) if a is not None: return a[::-1] def fracgcd(p, q): x, y = p, q while y: x, y = y, x % y if x != 1: p //= x q //= x if q == 1: return p return p, q def pslqstring(r, constants): q = r[0] r = r[1:] s = [] for i in range(len(r)): p = r[i] if p: z = fracgcd(-p,q) cs = constants[i][1] if cs == '1': cs = '' else: cs = '*' + cs if isinstance(z, (int, long)): if z > 0: term = str(z) + cs else: term = ("(%s)" % z) + cs else: term = ("(%s/%s)" % z) + cs s.append(term) s = ' + '.join(s) if '+' in s or '*' in s: s = '(' + s + ')' return s or '0' def prodstring(r, constants): q = r[0] r = r[1:] num = [] den = [] for i in range(len(r)): p = r[i] if p: z = fracgcd(-p,q) cs = constants[i][1] if isinstance(z, (int, long)): if abs(z) == 1: t = cs else: t = '%s**%s' % (cs, abs(z)) ([num,den][z<0]).append(t) else: t = '%s**(%s/%s)' % (cs, abs(z[0]), z[1]) ([num,den][z[0]<0]).append(t) num = '*'.join(num) den = '*'.join(den) if num and den: return "(%s)/(%s)" % (num, den) if num: return num if den: return "1/(%s)" % den def quadraticstring(t,a,b,c): if c < 0: a,b,c = -a,-b,-c u1 = (-b+sqrt(b**2-4*a*c))/(2*c) u2 = (-b-sqrt(b**2-4*a*c))/(2*c) if abs(u1-t) < abs(u2-t): if b: s = '((%s+sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c) else: s = '(sqrt(%s)/%s)' % (-4*a*c,2*c) else: if b: s = '((%s-sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c) else: s = '(-sqrt(%s)/%s)' % (-4*a*c,2*c) return s # Transformation y = f(x,c), with inverse function x = f(y,c) # The third entry indicates whether the transformation is # redundant when c = 1 transforms = [ (lambda x,c: x*c, '$y/$c', 0), (lambda x,c: x/c, '$c*$y', 1), (lambda x,c: c/x, '$c/$y', 0), (lambda x,c: (x*c)**2, 'sqrt($y)/$c', 0), (lambda x,c: (x/c)**2, '$c*sqrt($y)', 1), (lambda x,c: (c/x)**2, '$c/sqrt($y)', 0), (lambda x,c: c*x**2, 'sqrt($y)/sqrt($c)', 1), (lambda x,c: x**2/c, 'sqrt($c)*sqrt($y)', 1), (lambda x,c: c/x**2, 'sqrt($c)/sqrt($y)', 1), (lambda x,c: sqrt(x*c), '$y**2/$c', 0), (lambda x,c: sqrt(x/c), '$c*$y**2', 1), (lambda x,c: sqrt(c/x), '$c/$y**2', 0), (lambda x,c: c*sqrt(x), '$y**2/$c**2', 1), (lambda x,c: sqrt(x)/c, '$c**2*$y**2', 1), (lambda x,c: c/sqrt(x), '$c**2/$y**2', 1), (lambda x,c: exp(x*c), 'log($y)/$c', 0), (lambda x,c: exp(x/c), '$c*log($y)', 1), (lambda x,c: exp(c/x), '$c/log($y)', 0), (lambda x,c: c*exp(x), 'log($y/$c)', 1), (lambda x,c: exp(x)/c, 'log($c*$y)', 1), (lambda x,c: c/exp(x), 'log($c/$y)', 0), (lambda x,c: log(x*c), 'exp($y)/$c', 0), (lambda x,c: log(x/c), '$c*exp($y)', 1), (lambda x,c: log(c/x), '$c/exp($y)', 0), (lambda x,c: c*log(x), 'exp($y/$c)', 1), (lambda x,c: log(x)/c, 'exp($c*$y)', 1), (lambda x,c: c/log(x), 'exp($c/$y)', 0), ] def identify(x, constants=[], tol=None, maxcoeff=1000, full=False, verbose=False): """ Given a real number `x`, ``identify(x)`` attempts to find an exact formula for `x`. This formula is returned as a string. If no match is found, ``None`` is returned. With ``full=True``, a list of matching formulas is returned. As a simple example, :func:`identify` will find an algebraic formula for the golden ratio:: >>> from mpmath import * >>> mp.dps = 15 >>> identify(phi) '((1+sqrt(5))/2)' :func:`identify` can identify simple algebraic numbers and simple combinations of given base constants, as well as certain basic transformations thereof. More specifically, :func:`identify` looks for the following: 1. Fractions 2. Quadratic algebraic numbers 3. Rational linear combinations of the base constants 4. Any of the above after first transforming `x` into `f(x)` where `f(x)` is `1/x`, `\sqrt x`, `x^2`, `\log x` or `\exp x`, either directly or with `x` or `f(x)` multiplied or divided by one of the base constants 5. Products of fractional powers of the base constants and small integers Base constants can be given as a list of strings representing mpmath expressions (:func:`identify` will ``eval`` the strings to numerical values and use the original strings for the output), or as a dict of formula:value pairs. In order not to produce spurious results, :func:`identify` should be used with high precision; preferrably 50 digits or more. **Examples** Simple identifications can be performed safely at standard precision. Here the default recognition of rational, algebraic, and exp/log of algebraic numbers is demonstrated:: >>> mp.dps = 15 >>> identify(0.22222222222222222) '(2/9)' >>> identify(1.9662210973805663) 'sqrt(((24+sqrt(48))/8))' >>> identify(4.1132503787829275) 'exp((sqrt(8)/2))' >>> identify(0.881373587019543) 'log(((2+sqrt(8))/2))' By default, :func:`identify` does not recognize `\pi`. At standard precision it finds a not too useful approximation. At slightly increased precision, this approximation is no longer accurate enough and :func:`identify` more correctly returns ``None``:: >>> identify(pi) '(2**(176/117)*3**(20/117)*5**(35/39))/(7**(92/117))' >>> mp.dps = 30 >>> identify(pi) >>> Numbers such as `\pi`, and simple combinations of user-defined constants, can be identified if they are provided explicitly:: >>> identify(3*pi-2*e, ['pi', 'e']) '(3*pi + (-2)*e)' Here is an example using a dict of constants. Note that the constants need not be "atomic"; :func:`identify` can just as well express the given number in terms of expressions given by formulas:: >>> identify(pi+e, {'a':pi+2, 'b':2*e}) '((-2) + 1*a + (1/2)*b)' Next, we attempt some identifications with a set of base constants. It is necessary to increase the precision a bit. >>> mp.dps = 50 >>> base = ['sqrt(2)','pi','log(2)'] >>> identify(0.25, base) '(1/4)' >>> identify(3*pi + 2*sqrt(2) + 5*log(2)/7, base) '(2*sqrt(2) + 3*pi + (5/7)*log(2))' >>> identify(exp(pi+2), base) 'exp((2 + 1*pi))' >>> identify(1/(3+sqrt(2)), base) '((3/7) + (-1/7)*sqrt(2))' >>> identify(sqrt(2)/(3*pi+4), base) 'sqrt(2)/(4 + 3*pi)' >>> identify(5**(mpf(1)/3)*pi*log(2)**2, base) '5**(1/3)*pi*log(2)**2' An example of an erroneous solution being found when too low precision is used:: >>> mp.dps = 15 >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)']) '((11/25) + (-158/75)*pi + (76/75)*e + (44/15)*sqrt(2))' >>> mp.dps = 50 >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)']) '1/(3*pi + (-4)*e + 2*sqrt(2))' **Finding approximate solutions** The tolerance ``tol`` defaults to 3/4 of the working precision. Lowering the tolerance is useful for finding approximate matches. We can for example try to generate approximations for pi:: >>> mp.dps = 15 >>> identify(pi, tol=1e-2) '(22/7)' >>> identify(pi, tol=1e-3) '(355/113)' >>> identify(pi, tol=1e-10) '(5**(339/269))/(2**(64/269)*3**(13/269)*7**(92/269))' With ``full=True``, and by supplying a few base constants, ``identify`` can generate almost endless lists of approximations for any number (the output below has been truncated to show only the first few):: >>> for p in identify(pi, ['e', 'catalan'], tol=1e-5, full=True): ... print p ... # doctest: +ELLIPSIS e/log((6 + (-4/3)*e)) (3**3*5*e*catalan**2)/(2*7**2) sqrt(((-13) + 1*e + 22*catalan)) log(((-6) + 24*e + 4*catalan)/e) exp(catalan*((-1/5) + (8/15)*e)) catalan*(6 + (-6)*e + 15*catalan) sqrt((5 + 26*e + (-3)*catalan))/e e*sqrt(((-27) + 2*e + 25*catalan)) log(((-1) + (-11)*e + 59*catalan)) ((3/20) + (21/20)*e + (3/20)*catalan) ... The numerical values are roughly as close to pi as permitted by the specified tolerance: >>> print e/log(6-4*e/3) 3.14157719846001 >>> print 135*e*catalan**2/98 3.14166950419369 >>> print sqrt(e-13+22*catalan) 3.14158000062992 >>> print log(24*e-6+4*catalan)-1 3.14158791577159 **Symbolic processing** The output formula can be evaluated as a Python expression. Note however that if fractions (like '2/3') are present in the formula, Python's :func:`eval()` may erroneously perform integer division. Note also that the output is not necessarily in the algebraically simplest form:: >>> identify(sqrt(2)) '(sqrt(8)/2)' As a solution to both problems, consider using SymPy's :func:`sympify` to convert the formula into a symbolic expression. SymPy can be used to pretty-print or further simplify the formula symbolically:: >>> from sympy import sympify >>> sympify(identify(sqrt(2))) 2**(1/2) Sometimes :func:`identify` can simplify an expression further than a symbolic algorithm:: >>> from sympy import simplify >>> x = sympify('-1/(-3/2+(1/2)*5**(1/2))*(3/2-1/2*5**(1/2))**(1/2)') >>> x (3/2 - 5**(1/2)/2)**(-1/2) >>> x = simplify(x) >>> x 2/(6 - 2*5**(1/2))**(1/2) >>> mp.dps = 30 >>> x = sympify(identify(x.evalf(30))) >>> x 1/2 + 5**(1/2)/2 (In fact, this functionality is available directly in SymPy as the function :func:`nsimplify`, which is essentially a wrapper for :func:`identify`.) **Miscellaneous issues and limitations** The input `x` must be a real number. All base constants must be positive real numbers and must not be rationals or rational linear combinations of each other. The worst-case computation time grows quickly with the number of base constants. Already with 3 or 4 base constants, :func:`identify` may require several seconds to finish. To search for relations among a large number of constants, you should consider using :func:`pslq` directly. The extended transformations are applied to x, not the constants separately. As a result, ``identify`` will for example be able to recognize ``exp(2*pi+3)`` with ``pi`` given as a base constant, but not ``2*exp(pi)+3``. It will be able to recognize the latter if ``exp(pi)`` is given explicitly as a base constant. """ solutions = [] def addsolution(s): if verbose: print "Found: ", s solutions.append(s) x = mpf(x) # Further along, x will be assumed positive if x == 0: if full: return ['0'] else: return '0' if x < 0: sol = identify(-x, constants, tol, maxcoeff, full, verbose) if sol is None: return sol if full: return ["-(%s)"%s for s in sol] else: return "-(%s)" % sol if tol: tol = mpf(tol) else: tol = eps**0.7 M = maxcoeff if isinstance(constants, dict): constants = [(mpf(v), name) for (name, v) in constants.items()] else: import sympy.mpmath constants = [(eval(p, sympy.mpmath.__dict__), p) for p in constants] # We always want to find at least rational terms if 1 not in [value for (name, value) in constants]: constants = [(mpf(1), '1')] + constants # PSLQ with simple algebraic and functional transformations for ft, ftn, red in transforms: for c, cn in constants: if red and cn == '1': continue t = ft(x,c) # Prevent exponential transforms from wreaking havoc if abs(t) > M**2 or abs(t) < tol: continue # Linear combination of base constants r = pslq([t] + [a[0] for a in constants], tol, M) s = None if r is not None and max(abs(uw) for uw in r) <= M and r[0]: s = pslqstring(r, constants) # Quadratic algebraic numbers else: q = pslq([mpf(1), t, t**2], tol, M) if q is not None and len(q) == 3 and q[2]: aa, bb, cc = q if max(abs(aa),abs(bb),abs(cc)) <= M: s = quadraticstring(t,aa,bb,cc) if s: if cn == '1' and ('/$c' in ftn): s = ftn.replace('$y', s).replace('/$c', '') else: s = ftn.replace('$y', s).replace('$c', cn) addsolution(s) if not full: return solutions[0] if verbose: print "." # Check for a direct multiplicative formula if x != 1: # Allow fractional powers of fractions ilogs = [2,3,5,7] # Watch out for existing fractional powers of fractions logs = [] for a, s in constants: if not sum(bool(findpoly(log(a)/log(i),1)) for i in ilogs): logs.append((log(a), s)) logs = [(log(i),str(i)) for i in ilogs] + logs r = pslq([log(x)] + [a[0] for a in logs], tol, M) if r is not None and max(abs(uw) for uw in r) <= M and r[0]: addsolution(prodstring(r, logs)) if not full: return solutions[0] if full: return sorted(solutions, key=len) else: return None if __name__ == '__main__': import doctest doctest.testmod()