switching to high quality piper tts and added label translations
This commit is contained in:
@@ -0,0 +1,6 @@
|
||||
from . import calculus
|
||||
# XXX: hack to set methods
|
||||
from . import approximation
|
||||
from . import differentiation
|
||||
from . import extrapolation
|
||||
from . import polynomials
|
||||
@@ -0,0 +1,246 @@
|
||||
from ..libmp.backend import xrange
|
||||
from .calculus import defun
|
||||
|
||||
#----------------------------------------------------------------------------#
|
||||
# Approximation methods #
|
||||
#----------------------------------------------------------------------------#
|
||||
|
||||
# The Chebyshev approximation formula is given at:
|
||||
# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
|
||||
|
||||
# The only major changes in the following code is that we return the
|
||||
# expanded polynomial coefficients instead of Chebyshev coefficients,
|
||||
# and that we automatically transform [a,b] -> [-1,1] and back
|
||||
# for convenience.
|
||||
|
||||
# Coefficient in Chebyshev approximation
|
||||
def chebcoeff(ctx,f,a,b,j,N):
|
||||
s = ctx.mpf(0)
|
||||
h = ctx.mpf(0.5)
|
||||
for k in range(1, N+1):
|
||||
t = ctx.cospi((k-h)/N)
|
||||
s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
|
||||
return 2*s/N
|
||||
|
||||
# Generate Chebyshev polynomials T_n(ax+b) in expanded form
|
||||
def chebT(ctx, a=1, b=0):
|
||||
Tb = [1]
|
||||
yield Tb
|
||||
Ta = [b, a]
|
||||
while 1:
|
||||
yield Ta
|
||||
# Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
|
||||
Tmp = [0] + [2*a*t for t in Ta]
|
||||
for i, c in enumerate(Ta): Tmp[i] += 2*b*c
|
||||
for i, c in enumerate(Tb): Tmp[i] -= c
|
||||
Ta, Tb = Tmp, Ta
|
||||
|
||||
@defun
|
||||
def chebyfit(ctx, f, interval, N, error=False):
|
||||
r"""
|
||||
Computes a polynomial of degree `N-1` that approximates the
|
||||
given function `f` on the interval `[a, b]`. With ``error=True``,
|
||||
:func:`~mpmath.chebyfit` also returns an accurate estimate of the
|
||||
maximum absolute error; that is, the maximum value of
|
||||
`|f(x) - P(x)|` for `x \in [a, b]`.
|
||||
|
||||
:func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
|
||||
which gives a nearly optimal solution: that is, the maximum
|
||||
error of the approximating polynomial is very close to
|
||||
the smallest possible for any polynomial of the same degree.
|
||||
|
||||
Chebyshev approximation is very useful if one needs repeated
|
||||
evaluation of an expensive function, such as function defined
|
||||
implicitly by an integral or a differential equation. (For
|
||||
example, it could be used to turn a slow mpmath function
|
||||
into a fast machine-precision version of the same.)
|
||||
|
||||
**Examples**
|
||||
|
||||
Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
|
||||
of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
|
||||
>>> nprint(poly)
|
||||
[0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
|
||||
>>> nprint(err, 12)
|
||||
1.61351758081e-5
|
||||
|
||||
The polynomial can be evaluated using ``polyval``::
|
||||
|
||||
>>> nprint(polyval(poly, 1.6), 12)
|
||||
-0.0291858904138
|
||||
>>> nprint(cos(1.6), 12)
|
||||
-0.0291995223013
|
||||
|
||||
Sampling the true error at 1000 points shows that the error
|
||||
estimate generated by ``chebyfit`` is remarkably good::
|
||||
|
||||
>>> error = lambda x: abs(cos(x) - polyval(poly, x))
|
||||
>>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
|
||||
1.61349954245e-5
|
||||
|
||||
**Choice of degree**
|
||||
|
||||
The degree `N` can be set arbitrarily high, to obtain an
|
||||
arbitrarily good approximation. As a rule of thumb, an
|
||||
`N`-term Chebyshev approximation is good to `N/(b-a)` decimal
|
||||
places on a unit interval (although this depends on how
|
||||
well-behaved `f` is). The cost grows accordingly: ``chebyfit``
|
||||
evaluates the function `(N^2)/2` times to compute the
|
||||
coefficients and an additional `N` times to estimate the error.
|
||||
|
||||
**Possible issues**
|
||||
|
||||
One should be careful to use a sufficiently high working
|
||||
precision both when calling ``chebyfit`` and when evaluating
|
||||
the resulting polynomial, as the polynomial is sometimes
|
||||
ill-conditioned. It is for example difficult to reach
|
||||
15-digit accuracy when evaluating the polynomial using
|
||||
machine precision floats, no matter the theoretical
|
||||
accuracy of the polynomial. (The option to return the
|
||||
coefficients in Chebyshev form should be made available
|
||||
in the future.)
|
||||
|
||||
It is important to note the Chebyshev approximation works
|
||||
poorly if `f` is not smooth. A function containing singularities,
|
||||
rapid oscillation, etc can be approximated more effectively by
|
||||
multiplying it by a weight function that cancels out the
|
||||
nonsmooth features, or by dividing the interval into several
|
||||
segments.
|
||||
"""
|
||||
a, b = ctx._as_points(interval)
|
||||
orig = ctx.prec
|
||||
try:
|
||||
ctx.prec = orig + int(N**0.5) + 20
|
||||
c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
|
||||
d = [ctx.zero] * N
|
||||
d[0] = -c[0]/2
|
||||
h = ctx.mpf(0.5)
|
||||
T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
|
||||
for (k, Tk) in zip(range(N), T):
|
||||
for i in range(len(Tk)):
|
||||
d[i] += c[k]*Tk[i]
|
||||
d = d[::-1]
|
||||
# Estimate maximum error
|
||||
err = ctx.zero
|
||||
for k in range(N):
|
||||
x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
|
||||
err = max(err, abs(f(x) - ctx.polyval(d, x)))
|
||||
finally:
|
||||
ctx.prec = orig
|
||||
if error:
|
||||
return d, +err
|
||||
else:
|
||||
return d
|
||||
|
||||
@defun
|
||||
def fourier(ctx, f, interval, N):
|
||||
r"""
|
||||
Computes the Fourier series of degree `N` of the given function
|
||||
on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
|
||||
two lists `(c, s)` of coefficients (the cosine series and sine
|
||||
series, respectively), such that
|
||||
|
||||
.. math ::
|
||||
|
||||
f(x) \sim \sum_{k=0}^N
|
||||
c_k \cos(k m x) + s_k \sin(k m x)
|
||||
|
||||
where `m = 2 \pi / (b-a)`.
|
||||
|
||||
Note that many texts define the first coefficient as `2 c_0` instead
|
||||
of `c_0`. The easiest way to evaluate the computed series correctly
|
||||
is to pass it to :func:`~mpmath.fourierval`.
|
||||
|
||||
**Examples**
|
||||
|
||||
The function `f(x) = x` has a simple Fourier series on the standard
|
||||
interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
|
||||
the function has odd symmetry), and the sine coefficients are
|
||||
rational numbers::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> c, s = fourier(lambda x: x, [-pi, pi], 5)
|
||||
>>> nprint(c)
|
||||
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
|
||||
>>> nprint(s)
|
||||
[0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
|
||||
|
||||
This computes a Fourier series of a nonsymmetric function on
|
||||
a nonstandard interval::
|
||||
|
||||
>>> I = [-1, 1.5]
|
||||
>>> f = lambda x: x**2 - 4*x + 1
|
||||
>>> cs = fourier(f, I, 4)
|
||||
>>> nprint(cs[0])
|
||||
[0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
|
||||
>>> nprint(cs[1])
|
||||
[0.0, -2.6255, 0.580905, 0.219974, -0.540057]
|
||||
|
||||
It is instructive to plot a function along with its truncated
|
||||
Fourier series::
|
||||
|
||||
>>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
|
||||
|
||||
Fourier series generally converge slowly (and may not converge
|
||||
pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
|
||||
series gives an `L^2` error corresponding to 2-digit accuracy::
|
||||
|
||||
>>> I = [-1, 1]
|
||||
>>> cs = fourier(cosh, I, 9)
|
||||
>>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
|
||||
>>> nprint(sqrt(quad(g, I)))
|
||||
0.00467963
|
||||
|
||||
:func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
|
||||
the accuracy (and speed) can be improved by including all singular
|
||||
points in the interval specification::
|
||||
|
||||
>>> nprint(fourier(abs, [-1, 1], 0), 10)
|
||||
([0.5000441648], [0.0])
|
||||
>>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
|
||||
([0.5], [0.0])
|
||||
|
||||
"""
|
||||
interval = ctx._as_points(interval)
|
||||
a = interval[0]
|
||||
b = interval[-1]
|
||||
L = b-a
|
||||
cos_series = []
|
||||
sin_series = []
|
||||
cutoff = ctx.eps*10
|
||||
for n in xrange(N+1):
|
||||
m = 2*n*ctx.pi/L
|
||||
an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
|
||||
bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
|
||||
if n == 0:
|
||||
an /= 2
|
||||
if abs(an) < cutoff: an = ctx.zero
|
||||
if abs(bn) < cutoff: bn = ctx.zero
|
||||
cos_series.append(an)
|
||||
sin_series.append(bn)
|
||||
return cos_series, sin_series
|
||||
|
||||
@defun
|
||||
def fourierval(ctx, series, interval, x):
|
||||
"""
|
||||
Evaluates a Fourier series (in the format computed by
|
||||
by :func:`~mpmath.fourier` for the given interval) at the point `x`.
|
||||
|
||||
The series should be a pair `(c, s)` where `c` is the
|
||||
cosine series and `s` is the sine series. The two lists
|
||||
need not have the same length.
|
||||
"""
|
||||
cs, ss = series
|
||||
ab = ctx._as_points(interval)
|
||||
a = interval[0]
|
||||
b = interval[-1]
|
||||
m = 2*ctx.pi/(ab[-1]-ab[0])
|
||||
s = ctx.zero
|
||||
s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
|
||||
s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
|
||||
return s
|
||||
@@ -0,0 +1,6 @@
|
||||
class CalculusMethods(object):
|
||||
pass
|
||||
|
||||
def defun(f):
|
||||
setattr(CalculusMethods, f.__name__, f)
|
||||
return f
|
||||
@@ -0,0 +1,647 @@
|
||||
from ..libmp.backend import xrange
|
||||
from .calculus import defun
|
||||
|
||||
try:
|
||||
iteritems = dict.iteritems
|
||||
except AttributeError:
|
||||
iteritems = dict.items
|
||||
|
||||
#----------------------------------------------------------------------------#
|
||||
# Differentiation #
|
||||
#----------------------------------------------------------------------------#
|
||||
|
||||
@defun
|
||||
def difference(ctx, s, n):
|
||||
r"""
|
||||
Given a sequence `(s_k)` containing at least `n+1` items, returns the
|
||||
`n`-th forward difference,
|
||||
|
||||
.. math ::
|
||||
|
||||
\Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
|
||||
"""
|
||||
n = int(n)
|
||||
d = ctx.zero
|
||||
b = (-1) ** (n & 1)
|
||||
for k in xrange(n+1):
|
||||
d += b * s[k]
|
||||
b = (b * (k-n)) // (k+1)
|
||||
return d
|
||||
|
||||
def hsteps(ctx, f, x, n, prec, **options):
|
||||
singular = options.get('singular')
|
||||
addprec = options.get('addprec', 10)
|
||||
direction = options.get('direction', 0)
|
||||
workprec = (prec+2*addprec) * (n+1)
|
||||
orig = ctx.prec
|
||||
try:
|
||||
ctx.prec = workprec
|
||||
h = options.get('h')
|
||||
if h is None:
|
||||
if options.get('relative'):
|
||||
hextramag = int(ctx.mag(x))
|
||||
else:
|
||||
hextramag = 0
|
||||
h = ctx.ldexp(1, -prec-addprec-hextramag)
|
||||
else:
|
||||
h = ctx.convert(h)
|
||||
# Directed: steps x, x+h, ... x+n*h
|
||||
direction = options.get('direction', 0)
|
||||
if direction:
|
||||
h *= ctx.sign(direction)
|
||||
steps = xrange(n+1)
|
||||
norm = h
|
||||
# Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
|
||||
else:
|
||||
steps = xrange(-n, n+1, 2)
|
||||
norm = (2*h)
|
||||
# Perturb
|
||||
if singular:
|
||||
x += 0.5*h
|
||||
values = [f(x+k*h) for k in steps]
|
||||
return values, norm, workprec
|
||||
finally:
|
||||
ctx.prec = orig
|
||||
|
||||
|
||||
@defun
|
||||
def diff(ctx, f, x, n=1, **options):
|
||||
r"""
|
||||
Numerically computes the derivative of `f`, `f'(x)`, or generally for
|
||||
an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
|
||||
A few basic examples are::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> diff(lambda x: x**2 + x, 1.0)
|
||||
3.0
|
||||
>>> diff(lambda x: x**2 + x, 1.0, 2)
|
||||
2.0
|
||||
>>> diff(lambda x: x**2 + x, 1.0, 3)
|
||||
0.0
|
||||
>>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
|
||||
[20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
|
||||
|
||||
Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
|
||||
and order `(n_1, \ldots, n_k)`, the partial derivative
|
||||
`f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
|
||||
|
||||
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
|
||||
2.75
|
||||
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
|
||||
3.0
|
||||
|
||||
**Options**
|
||||
|
||||
The following optional keyword arguments are recognized:
|
||||
|
||||
``method``
|
||||
Supported methods are ``'step'`` or ``'quad'``: derivatives may be
|
||||
computed using either a finite difference with a small step
|
||||
size `h` (default), or numerical quadrature.
|
||||
``direction``
|
||||
Direction of finite difference: can be -1 for a left
|
||||
difference, 0 for a central difference (default), or +1
|
||||
for a right difference; more generally can be any complex number.
|
||||
``addprec``
|
||||
Extra precision for `h` used to account for the function's
|
||||
sensitivity to perturbations (default = 10).
|
||||
``relative``
|
||||
Choose `h` relative to the magnitude of `x`, rather than an
|
||||
absolute value; useful for large or tiny `x` (default = False).
|
||||
``h``
|
||||
As an alternative to ``addprec`` and ``relative``, manually
|
||||
select the step size `h`.
|
||||
``singular``
|
||||
If True, evaluation exactly at the point `x` is avoided; this is
|
||||
useful for differentiating functions with removable singularities.
|
||||
Default = False.
|
||||
``radius``
|
||||
Radius of integration contour (with ``method = 'quad'``).
|
||||
Default = 0.25. A larger radius typically is faster and more
|
||||
accurate, but it must be chosen so that `f` has no
|
||||
singularities within the radius from the evaluation point.
|
||||
|
||||
A finite difference requires `n+1` function evaluations and must be
|
||||
performed at `(n+1)` times the target precision. Accordingly, `f` must
|
||||
support fast evaluation at high precision.
|
||||
|
||||
With integration, a larger number of function evaluations is
|
||||
required, but not much extra precision is required. For high order
|
||||
derivatives, this method may thus be faster if f is very expensive to
|
||||
evaluate at high precision.
|
||||
|
||||
**Further examples**
|
||||
|
||||
The direction option is useful for computing left- or right-sided
|
||||
derivatives of nonsmooth functions::
|
||||
|
||||
>>> diff(abs, 0, direction=0)
|
||||
0.0
|
||||
>>> diff(abs, 0, direction=1)
|
||||
1.0
|
||||
>>> diff(abs, 0, direction=-1)
|
||||
-1.0
|
||||
|
||||
More generally, if the direction is nonzero, a right difference
|
||||
is computed where the step size is multiplied by sign(direction).
|
||||
For example, with direction=+j, the derivative from the positive
|
||||
imaginary direction will be computed::
|
||||
|
||||
>>> diff(abs, 0, direction=j)
|
||||
(0.0 - 1.0j)
|
||||
|
||||
With integration, the result may have a small imaginary part
|
||||
even even if the result is purely real::
|
||||
|
||||
>>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
|
||||
(0.5 - 4.59...e-26j)
|
||||
>>> chop(_)
|
||||
0.5
|
||||
|
||||
Adding precision to obtain an accurate value::
|
||||
|
||||
>>> diff(cos, 1e-30)
|
||||
0.0
|
||||
>>> diff(cos, 1e-30, h=0.0001)
|
||||
-9.99999998328279e-31
|
||||
>>> diff(cos, 1e-30, addprec=100)
|
||||
-1.0e-30
|
||||
|
||||
"""
|
||||
partial = False
|
||||
try:
|
||||
orders = list(n)
|
||||
x = list(x)
|
||||
partial = True
|
||||
except TypeError:
|
||||
pass
|
||||
if partial:
|
||||
x = [ctx.convert(_) for _ in x]
|
||||
return _partial_diff(ctx, f, x, orders, options)
|
||||
method = options.get('method', 'step')
|
||||
if n == 0 and method != 'quad' and not options.get('singular'):
|
||||
return f(ctx.convert(x))
|
||||
prec = ctx.prec
|
||||
try:
|
||||
if method == 'step':
|
||||
values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
|
||||
ctx.prec = workprec
|
||||
v = ctx.difference(values, n) / norm**n
|
||||
elif method == 'quad':
|
||||
ctx.prec += 10
|
||||
radius = ctx.convert(options.get('radius', 0.25))
|
||||
def g(t):
|
||||
rei = radius*ctx.expj(t)
|
||||
z = x + rei
|
||||
return f(z) / rei**n
|
||||
d = ctx.quadts(g, [0, 2*ctx.pi])
|
||||
v = d * ctx.factorial(n) / (2*ctx.pi)
|
||||
else:
|
||||
raise ValueError("unknown method: %r" % method)
|
||||
finally:
|
||||
ctx.prec = prec
|
||||
return +v
|
||||
|
||||
def _partial_diff(ctx, f, xs, orders, options):
|
||||
if not orders:
|
||||
return f()
|
||||
if not sum(orders):
|
||||
return f(*xs)
|
||||
i = 0
|
||||
for i in range(len(orders)):
|
||||
if orders[i]:
|
||||
break
|
||||
order = orders[i]
|
||||
def fdiff_inner(*f_args):
|
||||
def inner(t):
|
||||
return f(*(f_args[:i] + (t,) + f_args[i+1:]))
|
||||
return ctx.diff(inner, f_args[i], order, **options)
|
||||
orders[i] = 0
|
||||
return _partial_diff(ctx, fdiff_inner, xs, orders, options)
|
||||
|
||||
@defun
|
||||
def diffs(ctx, f, x, n=None, **options):
|
||||
r"""
|
||||
Returns a generator that yields the sequence of derivatives
|
||||
|
||||
.. math ::
|
||||
|
||||
f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
|
||||
|
||||
With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
|
||||
function evaluations to generate the first `k` derivatives,
|
||||
rather than the roughly `O(k^2)` evaluations
|
||||
required if one calls :func:`~mpmath.diff` `k` separate times.
|
||||
|
||||
With `n < \infty`, the generator stops as soon as the
|
||||
`n`-th derivative has been generated. If the exact number of
|
||||
needed derivatives is known in advance, this is further
|
||||
slightly more efficient.
|
||||
|
||||
Options are the same as for :func:`~mpmath.diff`.
|
||||
|
||||
**Examples**
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15
|
||||
>>> nprint(list(diffs(cos, 1, 5)))
|
||||
[0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
|
||||
>>> for i, d in zip(range(6), diffs(cos, 1)):
|
||||
... print("%s %s" % (i, d))
|
||||
...
|
||||
0 0.54030230586814
|
||||
1 -0.841470984807897
|
||||
2 -0.54030230586814
|
||||
3 0.841470984807897
|
||||
4 0.54030230586814
|
||||
5 -0.841470984807897
|
||||
|
||||
"""
|
||||
if n is None:
|
||||
n = ctx.inf
|
||||
else:
|
||||
n = int(n)
|
||||
if options.get('method', 'step') != 'step':
|
||||
k = 0
|
||||
while k < n + 1:
|
||||
yield ctx.diff(f, x, k, **options)
|
||||
k += 1
|
||||
return
|
||||
singular = options.get('singular')
|
||||
if singular:
|
||||
yield ctx.diff(f, x, 0, singular=True)
|
||||
else:
|
||||
yield f(ctx.convert(x))
|
||||
if n < 1:
|
||||
return
|
||||
if n == ctx.inf:
|
||||
A, B = 1, 2
|
||||
else:
|
||||
A, B = 1, n+1
|
||||
while 1:
|
||||
callprec = ctx.prec
|
||||
y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
|
||||
for k in xrange(A, B):
|
||||
try:
|
||||
ctx.prec = workprec
|
||||
d = ctx.difference(y, k) / norm**k
|
||||
finally:
|
||||
ctx.prec = callprec
|
||||
yield +d
|
||||
if k >= n:
|
||||
return
|
||||
A, B = B, int(A*1.4+1)
|
||||
B = min(B, n)
|
||||
|
||||
def iterable_to_function(gen):
|
||||
gen = iter(gen)
|
||||
data = []
|
||||
def f(k):
|
||||
for i in xrange(len(data), k+1):
|
||||
data.append(next(gen))
|
||||
return data[k]
|
||||
return f
|
||||
|
||||
@defun
|
||||
def diffs_prod(ctx, factors):
|
||||
r"""
|
||||
Given a list of `N` iterables or generators yielding
|
||||
`f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
|
||||
generate `g(x), g'(x), g''(x), \ldots` where
|
||||
`g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
|
||||
|
||||
At high precision and for large orders, this is typically more efficient
|
||||
than numerical differentiation if the derivatives of each `f_k(x)`
|
||||
admit direct computation.
|
||||
|
||||
Note: This function does not increase the working precision internally,
|
||||
so guard digits may have to be added externally for full accuracy.
|
||||
|
||||
**Examples**
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> f = lambda x: exp(x)*cos(x)*sin(x)
|
||||
>>> u = diffs(f, 1)
|
||||
>>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
|
||||
>>> next(u); next(v)
|
||||
1.23586333600241
|
||||
1.23586333600241
|
||||
>>> next(u); next(v)
|
||||
0.104658952245596
|
||||
0.104658952245596
|
||||
>>> next(u); next(v)
|
||||
-5.96999877552086
|
||||
-5.96999877552086
|
||||
>>> next(u); next(v)
|
||||
-12.4632923122697
|
||||
-12.4632923122697
|
||||
|
||||
"""
|
||||
N = len(factors)
|
||||
if N == 1:
|
||||
for c in factors[0]:
|
||||
yield c
|
||||
else:
|
||||
u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
|
||||
v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
|
||||
n = 0
|
||||
while 1:
|
||||
#yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
|
||||
s = u(n) * v(0)
|
||||
a = 1
|
||||
for k in xrange(1,n+1):
|
||||
a = a * (n-k+1) // k
|
||||
s += a * u(n-k) * v(k)
|
||||
yield s
|
||||
n += 1
|
||||
|
||||
def dpoly(n, _cache={}):
|
||||
"""
|
||||
nth differentiation polynomial for exp (Faa di Bruno's formula).
|
||||
|
||||
TODO: most exponents are zero, so maybe a sparse representation
|
||||
would be better.
|
||||
"""
|
||||
if n in _cache:
|
||||
return _cache[n]
|
||||
if not _cache:
|
||||
_cache[0] = {(0,):1}
|
||||
R = dpoly(n-1)
|
||||
R = dict((c+(0,),v) for (c,v) in iteritems(R))
|
||||
Ra = {}
|
||||
for powers, count in iteritems(R):
|
||||
powers1 = (powers[0]+1,) + powers[1:]
|
||||
if powers1 in Ra:
|
||||
Ra[powers1] += count
|
||||
else:
|
||||
Ra[powers1] = count
|
||||
for powers, count in iteritems(R):
|
||||
if not sum(powers):
|
||||
continue
|
||||
for k,p in enumerate(powers):
|
||||
if p:
|
||||
powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
|
||||
if powers2 in Ra:
|
||||
Ra[powers2] += p*count
|
||||
else:
|
||||
Ra[powers2] = p*count
|
||||
_cache[n] = Ra
|
||||
return _cache[n]
|
||||
|
||||
@defun
|
||||
def diffs_exp(ctx, fdiffs):
|
||||
r"""
|
||||
Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
|
||||
generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
|
||||
|
||||
At high precision and for large orders, this is typically more efficient
|
||||
than numerical differentiation if the derivatives of `f(x)`
|
||||
admit direct computation.
|
||||
|
||||
Note: This function does not increase the working precision internally,
|
||||
so guard digits may have to be added externally for full accuracy.
|
||||
|
||||
**Examples**
|
||||
|
||||
The derivatives of the gamma function can be computed using
|
||||
logarithmic differentiation::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>>
|
||||
>>> def diffs_loggamma(x):
|
||||
... yield loggamma(x)
|
||||
... i = 0
|
||||
... while 1:
|
||||
... yield psi(i,x)
|
||||
... i += 1
|
||||
...
|
||||
>>> u = diffs_exp(diffs_loggamma(3))
|
||||
>>> v = diffs(gamma, 3)
|
||||
>>> next(u); next(v)
|
||||
2.0
|
||||
2.0
|
||||
>>> next(u); next(v)
|
||||
1.84556867019693
|
||||
1.84556867019693
|
||||
>>> next(u); next(v)
|
||||
2.49292999190269
|
||||
2.49292999190269
|
||||
>>> next(u); next(v)
|
||||
3.44996501352367
|
||||
3.44996501352367
|
||||
|
||||
"""
|
||||
fn = iterable_to_function(fdiffs)
|
||||
f0 = ctx.exp(fn(0))
|
||||
yield f0
|
||||
i = 1
|
||||
while 1:
|
||||
s = ctx.mpf(0)
|
||||
for powers, c in iteritems(dpoly(i)):
|
||||
s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
|
||||
yield s * f0
|
||||
i += 1
|
||||
|
||||
@defun
|
||||
def differint(ctx, f, x, n=1, x0=0):
|
||||
r"""
|
||||
Calculates the Riemann-Liouville differintegral, or fractional
|
||||
derivative, defined by
|
||||
|
||||
.. math ::
|
||||
|
||||
\,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
|
||||
\int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
|
||||
|
||||
where `f` is a given (presumably well-behaved) function,
|
||||
`x` is the evaluation point, `n` is the order, and `x_0` is
|
||||
the reference point of integration (`m` is an arbitrary
|
||||
parameter selected automatically).
|
||||
|
||||
With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
|
||||
the second derivative `f''(x)`, etc. With `n = -1`, it gives
|
||||
`\int_{x_0}^x f(t) dt`, with `n = -2`
|
||||
it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
|
||||
|
||||
As `n` is permitted to be any number, this operator generalizes
|
||||
iterated differentiation and iterated integration to a single
|
||||
operator with a continuous order parameter.
|
||||
|
||||
**Examples**
|
||||
|
||||
There is an exact formula for the fractional derivative of a
|
||||
monomial `x^p`, which may be used as a reference. For example,
|
||||
the following gives a half-derivative (order 0.5)::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> x = mpf(3); p = 2; n = 0.5
|
||||
>>> differint(lambda t: t**p, x, n)
|
||||
7.81764019044672
|
||||
>>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
|
||||
7.81764019044672
|
||||
|
||||
Another useful test function is the exponential function, whose
|
||||
integration / differentiation formula easy generalizes
|
||||
to arbitrary order. Here we first compute a third derivative,
|
||||
and then a triply nested integral. (The reference point `x_0`
|
||||
is set to `-\infty` to avoid nonzero endpoint terms.)::
|
||||
|
||||
>>> differint(lambda x: exp(pi*x), -1.5, 3)
|
||||
0.278538406900792
|
||||
>>> exp(pi*-1.5) * pi**3
|
||||
0.278538406900792
|
||||
>>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
|
||||
1922.50563031149
|
||||
>>> exp(pi*3.5) / pi**3
|
||||
1922.50563031149
|
||||
|
||||
However, for noninteger `n`, the differentiation formula for the
|
||||
exponential function must be modified to give the same result as the
|
||||
Riemann-Liouville differintegral::
|
||||
|
||||
>>> x = mpf(3.5)
|
||||
>>> c = pi
|
||||
>>> n = 1+2*j
|
||||
>>> differint(lambda x: exp(c*x), x, n)
|
||||
(-123295.005390743 + 140955.117867654j)
|
||||
>>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
|
||||
(-123295.005390743 + 140955.117867654j)
|
||||
|
||||
|
||||
"""
|
||||
m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
|
||||
r = m-n-1
|
||||
g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
|
||||
return ctx.diff(g, x, m) / ctx.gamma(m-n)
|
||||
|
||||
@defun
|
||||
def diffun(ctx, f, n=1, **options):
|
||||
r"""
|
||||
Given a function `f`, returns a function `g(x)` that evaluates the nth
|
||||
derivative `f^{(n)}(x)`::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> cos2 = diffun(sin)
|
||||
>>> sin2 = diffun(sin, 4)
|
||||
>>> cos(1.3), cos2(1.3)
|
||||
(0.267498828624587, 0.267498828624587)
|
||||
>>> sin(1.3), sin2(1.3)
|
||||
(0.963558185417193, 0.963558185417193)
|
||||
|
||||
The function `f` must support arbitrary precision evaluation.
|
||||
See :func:`~mpmath.diff` for additional details and supported
|
||||
keyword options.
|
||||
"""
|
||||
if n == 0:
|
||||
return f
|
||||
def g(x):
|
||||
return ctx.diff(f, x, n, **options)
|
||||
return g
|
||||
|
||||
@defun
|
||||
def taylor(ctx, f, x, n, **options):
|
||||
r"""
|
||||
Produces a degree-`n` Taylor polynomial around the point `x` of the
|
||||
given function `f`. The coefficients are returned as a list.
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> nprint(chop(taylor(sin, 0, 5)))
|
||||
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
|
||||
|
||||
The coefficients are computed using high-order numerical
|
||||
differentiation. The function must be possible to evaluate
|
||||
to arbitrary precision. See :func:`~mpmath.diff` for additional details
|
||||
and supported keyword options.
|
||||
|
||||
Note that to evaluate the Taylor polynomial as an approximation
|
||||
of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
|
||||
and the point of the Taylor expansion must be subtracted from
|
||||
the argument:
|
||||
|
||||
>>> p = taylor(exp, 2.0, 10)
|
||||
>>> polyval(p[::-1], 2.5 - 2.0)
|
||||
12.1824939606092
|
||||
>>> exp(2.5)
|
||||
12.1824939607035
|
||||
|
||||
"""
|
||||
gen = enumerate(ctx.diffs(f, x, n, **options))
|
||||
if options.get("chop", True):
|
||||
return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
|
||||
else:
|
||||
return [d/ctx.factorial(i) for i, d in gen]
|
||||
|
||||
@defun
|
||||
def pade(ctx, a, L, M):
|
||||
r"""
|
||||
Computes a Pade approximation of degree `(L, M)` to a function.
|
||||
Given at least `L+M+1` Taylor coefficients `a` approximating
|
||||
a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
|
||||
polynomials `P, Q` satisfying
|
||||
|
||||
.. math ::
|
||||
|
||||
P = \sum_{k=0}^L p_k x^k
|
||||
|
||||
Q = \sum_{k=0}^M q_k x^k
|
||||
|
||||
Q_0 = 1
|
||||
|
||||
A(x) Q(x) = P(x) + O(x^{L+M+1})
|
||||
|
||||
`P(x)/Q(x)` can provide a good approximation to an analytic function
|
||||
beyond the radius of convergence of its Taylor series (example
|
||||
from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
|
||||
Ch.1A)::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> one = mpf(1)
|
||||
>>> def f(x):
|
||||
... return sqrt((one + 2*x)/(one + x))
|
||||
...
|
||||
>>> a = taylor(f, 0, 6)
|
||||
>>> p, q = pade(a, 3, 3)
|
||||
>>> x = 10
|
||||
>>> polyval(p[::-1], x)/polyval(q[::-1], x)
|
||||
1.38169105566806
|
||||
>>> f(x)
|
||||
1.38169855941551
|
||||
|
||||
"""
|
||||
# To determine L+1 coefficients of P and M coefficients of Q
|
||||
# L+M+1 coefficients of A must be provided
|
||||
if len(a) < L+M+1:
|
||||
raise ValueError("L+M+1 Coefficients should be provided")
|
||||
|
||||
if M == 0:
|
||||
if L == 0:
|
||||
return [ctx.one], [ctx.one]
|
||||
else:
|
||||
return a[:L+1], [ctx.one]
|
||||
|
||||
# Solve first
|
||||
# a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
|
||||
# ...
|
||||
# a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
|
||||
A = ctx.matrix(M)
|
||||
for j in range(M):
|
||||
for i in range(min(M, L+j+1)):
|
||||
A[j, i] = a[L+j-i]
|
||||
v = -ctx.matrix(a[(L+1):(L+M+1)])
|
||||
x = ctx.lu_solve(A, v)
|
||||
q = [ctx.one] + list(x)
|
||||
# compute p
|
||||
p = [0]*(L+1)
|
||||
for i in range(L+1):
|
||||
s = a[i]
|
||||
for j in range(1, min(M,i) + 1):
|
||||
s += q[j]*a[i-j]
|
||||
p[i] = s
|
||||
return p, q
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,973 @@
|
||||
# contributed to mpmath by Kristopher L. Kuhlman, February 2017
|
||||
# contributed to mpmath by Guillermo Navas-Palencia, February 2022
|
||||
|
||||
class InverseLaplaceTransform(object):
|
||||
r"""
|
||||
Inverse Laplace transform methods are implemented using this
|
||||
class, in order to simplify the code and provide a common
|
||||
infrastructure.
|
||||
|
||||
Implement a custom inverse Laplace transform algorithm by
|
||||
subclassing :class:`InverseLaplaceTransform` and implementing the
|
||||
appropriate methods. The subclass can then be used by
|
||||
:func:`~mpmath.invertlaplace` by passing it as the *method*
|
||||
argument.
|
||||
"""
|
||||
|
||||
def __init__(self, ctx):
|
||||
self.ctx = ctx
|
||||
|
||||
def calc_laplace_parameter(self, t, **kwargs):
|
||||
r"""
|
||||
Determine the vector of Laplace parameter values needed for an
|
||||
algorithm, this will depend on the choice of algorithm (de
|
||||
Hoog is default), the algorithm-specific parameters passed (or
|
||||
default ones), and desired time.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def calc_time_domain_solution(self, fp):
|
||||
r"""
|
||||
Compute the time domain solution, after computing the
|
||||
Laplace-space function evaluations at the abscissa required
|
||||
for the algorithm. Abscissa computed for one algorithm are
|
||||
typically not useful for another algorithm.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class FixedTalbot(InverseLaplaceTransform):
|
||||
|
||||
def calc_laplace_parameter(self, t, **kwargs):
|
||||
r"""The "fixed" Talbot method deforms the Bromwich contour towards
|
||||
`-\infty` in the shape of a parabola. Traditionally the Talbot
|
||||
algorithm has adjustable parameters, but the "fixed" version
|
||||
does not. The `r` parameter could be passed in as a parameter,
|
||||
if you want to override the default given by (Abate & Valko,
|
||||
2004).
|
||||
|
||||
The Laplace parameter is sampled along a parabola opening
|
||||
along the negative imaginary axis, with the base of the
|
||||
parabola along the real axis at
|
||||
`p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in
|
||||
the approximation (degree) grows, the abscissa required for
|
||||
function evaluation tend towards `-\infty`, requiring high
|
||||
precision to prevent overflow. If any poles, branch cuts or
|
||||
other singularities exist such that the deformed Bromwich
|
||||
contour lies to the left of the singularity, the method will
|
||||
fail.
|
||||
|
||||
**Optional arguments**
|
||||
|
||||
:class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
|
||||
recognizes the following keywords
|
||||
|
||||
*tmax*
|
||||
maximum time associated with vector of times
|
||||
(typically just the time requested)
|
||||
*degree*
|
||||
integer order of approximation (M = number of terms)
|
||||
*r*
|
||||
abscissa for `p_0` (otherwise computed using rule
|
||||
of thumb `2M/5`)
|
||||
|
||||
The working precision will be increased according to a rule of
|
||||
thumb. If 'degree' is not specified, the working precision and
|
||||
degree are chosen to hopefully achieve the dps of the calling
|
||||
context. If 'degree' is specified, the working precision is
|
||||
chosen to achieve maximum resulting precision for the
|
||||
specified degree.
|
||||
|
||||
.. math ::
|
||||
|
||||
p_0=\frac{r}{t}
|
||||
|
||||
.. math ::
|
||||
|
||||
p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left(
|
||||
\frac{i\pi}{M}\right) + j \right] \qquad 1\le i <M
|
||||
|
||||
where `j=\sqrt{-1}`, `r=2M/5`, and `t_\mathrm{max}` is the
|
||||
maximum specified time.
|
||||
|
||||
"""
|
||||
|
||||
# required
|
||||
# ------------------------------
|
||||
# time of desired approximation
|
||||
self.t = self.ctx.convert(t)
|
||||
|
||||
# optional
|
||||
# ------------------------------
|
||||
# maximum time desired (used for scaling) default is requested
|
||||
# time.
|
||||
self.tmax = self.ctx.convert(kwargs.get('tmax', self.t))
|
||||
|
||||
# empirical relationships used here based on a linear fit of
|
||||
# requested and delivered dps for exponentially decaying time
|
||||
# functions for requested dps up to 512.
|
||||
|
||||
if 'degree' in kwargs:
|
||||
self.degree = kwargs['degree']
|
||||
self.dps_goal = self.degree
|
||||
else:
|
||||
self.dps_goal = int(1.72*self.ctx.dps)
|
||||
self.degree = max(12, int(1.38*self.dps_goal))
|
||||
|
||||
M = self.degree
|
||||
|
||||
# this is adjusting the dps of the calling context hopefully
|
||||
# the caller doesn't monkey around with it between calling
|
||||
# this routine and calc_time_domain_solution()
|
||||
self.dps_orig = self.ctx.dps
|
||||
self.ctx.dps = self.dps_goal
|
||||
|
||||
# Abate & Valko rule of thumb for r parameter
|
||||
self.r = kwargs.get('r', self.ctx.fraction(2, 5)*M)
|
||||
|
||||
self.theta = self.ctx.linspace(0.0, self.ctx.pi, M+1)
|
||||
|
||||
self.cot_theta = self.ctx.matrix(M, 1)
|
||||
self.cot_theta[0] = 0 # not used
|
||||
|
||||
# all but time-dependent part of p
|
||||
self.delta = self.ctx.matrix(M, 1)
|
||||
self.delta[0] = self.r
|
||||
|
||||
for i in range(1, M):
|
||||
self.cot_theta[i] = self.ctx.cot(self.theta[i])
|
||||
self.delta[i] = self.r*self.theta[i]*(self.cot_theta[i] + 1j)
|
||||
|
||||
self.p = self.ctx.matrix(M, 1)
|
||||
self.p = self.delta/self.tmax
|
||||
|
||||
# NB: p is complex (mpc)
|
||||
|
||||
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
||||
r"""The fixed Talbot time-domain solution is computed from the
|
||||
Laplace-space function evaluations using
|
||||
|
||||
.. math ::
|
||||
|
||||
f(t,M)=\frac{2}{5t}\sum_{k=0}^{M-1}\Re \left[
|
||||
\gamma_k \bar{f}(p_k)\right]
|
||||
|
||||
where
|
||||
|
||||
.. math ::
|
||||
|
||||
\gamma_0 = \frac{1}{2}e^{r}\bar{f}(p_0)
|
||||
|
||||
.. math ::
|
||||
|
||||
\gamma_k = e^{tp_k}\left\lbrace 1 + \frac{jk\pi}{M}\left[1 +
|
||||
\cot \left( \frac{k \pi}{M} \right)^2 \right] - j\cot\left(
|
||||
\frac{k \pi}{M}\right)\right \rbrace \qquad 1\le k<M.
|
||||
|
||||
Again, `j=\sqrt{-1}`.
|
||||
|
||||
Before calling this function, call
|
||||
:class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
|
||||
to set the parameters and compute the required coefficients.
|
||||
|
||||
**References**
|
||||
|
||||
1. Abate, J., P. Valko (2004). Multi-precision Laplace
|
||||
transform inversion. *International Journal for Numerical
|
||||
Methods in Engineering* 60:979-993,
|
||||
http://dx.doi.org/10.1002/nme.995
|
||||
2. Talbot, A. (1979). The accurate numerical inversion of
|
||||
Laplace transforms. *IMA Journal of Applied Mathematics*
|
||||
23(1):97, http://dx.doi.org/10.1093/imamat/23.1.97
|
||||
"""
|
||||
|
||||
# required
|
||||
# ------------------------------
|
||||
self.t = self.ctx.convert(t)
|
||||
|
||||
# assume fp was computed from p matrix returned from
|
||||
# calc_laplace_parameter(), so is already a list or matrix of
|
||||
# mpmath 'mpc' types
|
||||
|
||||
# these were computed in previous call to
|
||||
# calc_laplace_parameter()
|
||||
theta = self.theta
|
||||
delta = self.delta
|
||||
M = self.degree
|
||||
p = self.p
|
||||
r = self.r
|
||||
|
||||
ans = self.ctx.matrix(M, 1)
|
||||
ans[0] = self.ctx.exp(delta[0])*fp[0]/2
|
||||
|
||||
for i in range(1, M):
|
||||
ans[i] = self.ctx.exp(delta[i])*fp[i]*(
|
||||
1 + 1j*theta[i]*(1 + self.cot_theta[i]**2) -
|
||||
1j*self.cot_theta[i])
|
||||
|
||||
result = self.ctx.fraction(2, 5)*self.ctx.fsum(ans)/self.t
|
||||
|
||||
# setting dps back to value when calc_laplace_parameter was
|
||||
# called, unless flag is set.
|
||||
if not manual_prec:
|
||||
self.ctx.dps = self.dps_orig
|
||||
|
||||
return result.real
|
||||
|
||||
|
||||
# ****************************************
|
||||
|
||||
class Stehfest(InverseLaplaceTransform):
|
||||
|
||||
def calc_laplace_parameter(self, t, **kwargs):
|
||||
r"""
|
||||
The Gaver-Stehfest method is a discrete approximation of the
|
||||
Widder-Post inversion algorithm, rather than a direct
|
||||
approximation of the Bromwich contour integral.
|
||||
|
||||
The method abscissa along the real axis, and therefore has
|
||||
issues inverting oscillatory functions (which have poles in
|
||||
pairs away from the real axis).
|
||||
|
||||
The working precision will be increased according to a rule of
|
||||
thumb. If 'degree' is not specified, the working precision and
|
||||
degree are chosen to hopefully achieve the dps of the calling
|
||||
context. If 'degree' is specified, the working precision is
|
||||
chosen to achieve maximum resulting precision for the
|
||||
specified degree.
|
||||
|
||||
.. math ::
|
||||
|
||||
p_k = \frac{k \log 2}{t} \qquad 1 \le k \le M
|
||||
"""
|
||||
|
||||
# required
|
||||
# ------------------------------
|
||||
# time of desired approximation
|
||||
self.t = self.ctx.convert(t)
|
||||
|
||||
# optional
|
||||
# ------------------------------
|
||||
|
||||
# empirical relationships used here based on a linear fit of
|
||||
# requested and delivered dps for exponentially decaying time
|
||||
# functions for requested dps up to 512.
|
||||
|
||||
if 'degree' in kwargs:
|
||||
self.degree = kwargs['degree']
|
||||
self.dps_goal = int(1.38*self.degree)
|
||||
else:
|
||||
self.dps_goal = int(2.93*self.ctx.dps)
|
||||
self.degree = max(16, self.dps_goal)
|
||||
|
||||
# _coeff routine requires even degree
|
||||
if self.degree % 2 > 0:
|
||||
self.degree += 1
|
||||
|
||||
M = self.degree
|
||||
|
||||
# this is adjusting the dps of the calling context
|
||||
# hopefully the caller doesn't monkey around with it
|
||||
# between calling this routine and calc_time_domain_solution()
|
||||
self.dps_orig = self.ctx.dps
|
||||
self.ctx.dps = self.dps_goal
|
||||
|
||||
self.V = self._coeff()
|
||||
self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t
|
||||
|
||||
# NB: p is real (mpf)
|
||||
|
||||
def _coeff(self):
|
||||
r"""Salzer summation weights (aka, "Stehfest coefficients")
|
||||
only depend on the approximation order (M) and the precision"""
|
||||
|
||||
M = self.degree
|
||||
M2 = int(M/2) # checked earlier that M is even
|
||||
|
||||
V = self.ctx.matrix(M, 1)
|
||||
|
||||
# Salzer summation weights
|
||||
# get very large in magnitude and oscillate in sign,
|
||||
# if the precision is not high enough, there will be
|
||||
# catastrophic cancellation
|
||||
for k in range(1, M+1):
|
||||
z = self.ctx.matrix(min(k, M2)+1, 1)
|
||||
for j in range(int((k+1)/2), min(k, M2)+1):
|
||||
z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/
|
||||
(self.ctx.fac(M2-j)*self.ctx.fac(j)*
|
||||
self.ctx.fac(j-1)*self.ctx.fac(k-j)*
|
||||
self.ctx.fac(2*j-k)))
|
||||
V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z)
|
||||
|
||||
return V
|
||||
|
||||
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
||||
r"""Compute time-domain Stehfest algorithm solution.
|
||||
|
||||
.. math ::
|
||||
|
||||
f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left(
|
||||
p_k \right)
|
||||
|
||||
where
|
||||
|
||||
.. math ::
|
||||
|
||||
V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor}
|
||||
\frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \,
|
||||
\left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!}
|
||||
|
||||
As the degree increases, the abscissa (`p_k`) only increase
|
||||
linearly towards `\infty`, but the Stehfest coefficients
|
||||
(`V_k`) alternate in sign and increase rapidly in sign,
|
||||
requiring high precision to prevent overflow or loss of
|
||||
significance when evaluating the sum.
|
||||
|
||||
**References**
|
||||
|
||||
1. Widder, D. (1941). *The Laplace Transform*. Princeton.
|
||||
2. Stehfest, H. (1970). Algorithm 368: numerical inversion of
|
||||
Laplace transforms. *Communications of the ACM* 13(1):47-49,
|
||||
http://dx.doi.org/10.1145/361953.361969
|
||||
|
||||
"""
|
||||
|
||||
# required
|
||||
self.t = self.ctx.convert(t)
|
||||
|
||||
# assume fp was computed from p matrix returned from
|
||||
# calc_laplace_parameter(), so is already
|
||||
# a list or matrix of mpmath 'mpf' types
|
||||
|
||||
result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t
|
||||
|
||||
# setting dps back to value when calc_laplace_parameter was called
|
||||
if not manual_prec:
|
||||
self.ctx.dps = self.dps_orig
|
||||
|
||||
# ignore any small imaginary part
|
||||
return result.real
|
||||
|
||||
|
||||
# ****************************************
|
||||
|
||||
class deHoog(InverseLaplaceTransform):
|
||||
|
||||
def calc_laplace_parameter(self, t, **kwargs):
|
||||
r"""the de Hoog, Knight & Stokes algorithm is an
|
||||
accelerated form of the Fourier series numerical
|
||||
inverse Laplace transform algorithms.
|
||||
|
||||
.. math ::
|
||||
|
||||
p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1
|
||||
|
||||
where
|
||||
|
||||
.. math ::
|
||||
|
||||
\gamma = \alpha - \frac{\log \mathrm{tol}}{2T},
|
||||
|
||||
`j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time,
|
||||
`\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the
|
||||
rightmost pole or singularity, which is chosen based on the
|
||||
desired accuracy (assuming the rightmost singularity is 0),
|
||||
and `\mathrm{tol}=10\alpha` is the desired tolerance, which is
|
||||
chosen in relation to `\alpha`.`
|
||||
|
||||
When increasing the degree, the abscissa increase towards
|
||||
`j\infty`, but more slowly than the fixed Talbot
|
||||
algorithm. The de Hoog et al. algorithm typically does better
|
||||
with oscillatory functions of time, and less well-behaved
|
||||
functions. The method tends to be slower than the Talbot and
|
||||
Stehfest algorithsm, especially so at very high precision
|
||||
(e.g., `>500` digits precision).
|
||||
|
||||
"""
|
||||
|
||||
# required
|
||||
# ------------------------------
|
||||
self.t = self.ctx.convert(t)
|
||||
|
||||
# optional
|
||||
# ------------------------------
|
||||
self.tmax = kwargs.get('tmax', self.t)
|
||||
|
||||
# empirical relationships used here based on a linear fit of
|
||||
# requested and delivered dps for exponentially decaying time
|
||||
# functions for requested dps up to 512.
|
||||
|
||||
if 'degree' in kwargs:
|
||||
self.degree = kwargs['degree']
|
||||
self.dps_goal = int(1.38*self.degree)
|
||||
else:
|
||||
self.dps_goal = int(self.ctx.dps*1.36)
|
||||
self.degree = max(10, self.dps_goal)
|
||||
|
||||
# 2*M+1 terms in approximation
|
||||
M = self.degree
|
||||
|
||||
# adjust alpha component of abscissa of convergence for higher
|
||||
# precision
|
||||
tmp = self.ctx.power(10.0, -self.dps_goal)
|
||||
self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
|
||||
|
||||
# desired tolerance (here simply related to alpha)
|
||||
self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0))
|
||||
self.np = 2*self.degree+1 # number of terms in approximation
|
||||
|
||||
# this is adjusting the dps of the calling context
|
||||
# hopefully the caller doesn't monkey around with it
|
||||
# between calling this routine and calc_time_domain_solution()
|
||||
self.dps_orig = self.ctx.dps
|
||||
self.ctx.dps = self.dps_goal
|
||||
|
||||
# scaling factor (likely tun-able, but 2 is typical)
|
||||
self.scale = kwargs.get('scale', 2)
|
||||
self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax))
|
||||
|
||||
self.p = self.ctx.matrix(2*M+1, 1)
|
||||
self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T)
|
||||
self.p = (self.gamma + self.ctx.pi*
|
||||
self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j)
|
||||
|
||||
# NB: p is complex (mpc)
|
||||
|
||||
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
||||
r"""Calculate time-domain solution for
|
||||
de Hoog, Knight & Stokes algorithm.
|
||||
|
||||
The un-accelerated Fourier series approach is:
|
||||
|
||||
.. math ::
|
||||
|
||||
f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'}
|
||||
\Re\left[\bar{f}\left( p_k \right)
|
||||
e^{i\pi t/T} \right],
|
||||
|
||||
where the prime on the summation indicates the first term is halved.
|
||||
|
||||
This simplistic approach requires so many function evaluations
|
||||
that it is not practical. Non-linear acceleration is
|
||||
accomplished via Pade-approximation and an analytic expression
|
||||
for the remainder of the continued fraction. See the original
|
||||
paper (reference 2 below) a detailed description of the
|
||||
numerical approach.
|
||||
|
||||
**References**
|
||||
|
||||
1. Davies, B. (2005). *Integral Transforms and their
|
||||
Applications*, Third Edition. Springer.
|
||||
2. de Hoog, F., J. Knight, A. Stokes (1982). An improved
|
||||
method for numerical inversion of Laplace transforms. *SIAM
|
||||
Journal of Scientific and Statistical Computing* 3:357-366,
|
||||
http://dx.doi.org/10.1137/0903022
|
||||
|
||||
"""
|
||||
|
||||
M = self.degree
|
||||
np = self.np
|
||||
T = self.T
|
||||
|
||||
self.t = self.ctx.convert(t)
|
||||
|
||||
# would it be useful to try re-using
|
||||
# space between e&q and A&B?
|
||||
e = self.ctx.zeros(np, M+1)
|
||||
q = self.ctx.matrix(2*M, M)
|
||||
d = self.ctx.matrix(np, 1)
|
||||
A = self.ctx.zeros(np+1, 1)
|
||||
B = self.ctx.ones(np+1, 1)
|
||||
|
||||
# initialize Q-D table
|
||||
e[:, 0] = 0.0 + 0j
|
||||
q[0, 0] = fp[1]/(fp[0]/2)
|
||||
for i in range(1, 2*M):
|
||||
q[i, 0] = fp[i+1]/fp[i]
|
||||
|
||||
# rhombus rule for filling triangular Q-D table (e & q)
|
||||
for r in range(1, M+1):
|
||||
# start with e, column 1, 0:2*M-2
|
||||
mr = 2*(M-r) + 1
|
||||
e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1]
|
||||
if not r == M:
|
||||
rq = r+1
|
||||
mr = 2*(M-rq)+1 + 2
|
||||
for i in range(mr):
|
||||
q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1]
|
||||
|
||||
# build up continued fraction coefficients (d)
|
||||
d[0] = fp[0]/2
|
||||
for r in range(1, M+1):
|
||||
d[2*r-1] = -q[0, r-1] # even terms
|
||||
d[2*r] = -e[0, r] # odd terms
|
||||
|
||||
# seed A and B for recurrence
|
||||
A[0] = 0.0 + 0.0j
|
||||
A[1] = d[0]
|
||||
B[0:2] = 1.0 + 0.0j
|
||||
|
||||
# base of the power series
|
||||
z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn
|
||||
|
||||
# coefficients of Pade approximation (A & B)
|
||||
# using recurrence for all but last term
|
||||
for i in range(1, 2*M):
|
||||
A[i+1] = A[i] + d[i]*A[i-1]*z
|
||||
B[i+1] = B[i] + d[i]*B[i-1]*z
|
||||
|
||||
# "improved remainder" to continued fraction
|
||||
brem = (1 + (d[2*M-1] - d[2*M])*z)/2
|
||||
# powm1(x,y) computes x^y - 1 more accurately near zero
|
||||
rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem,
|
||||
self.ctx.fraction(1, 2))
|
||||
|
||||
# last term of recurrence using new remainder
|
||||
A[np] = A[2*M] + rem*A[2*M-1]
|
||||
B[np] = B[2*M] + rem*B[2*M-1]
|
||||
|
||||
# diagonal Pade approximation
|
||||
# F=A/B represents accelerated trapezoid rule
|
||||
result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real
|
||||
|
||||
# setting dps back to value when calc_laplace_parameter was called
|
||||
if not manual_prec:
|
||||
self.ctx.dps = self.dps_orig
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ****************************************
|
||||
|
||||
class Cohen(InverseLaplaceTransform):
|
||||
|
||||
def calc_laplace_parameter(self, t, **kwargs):
|
||||
r"""The Cohen algorithm accelerates the convergence of the nearly
|
||||
alternating series resulting from the application of the trapezoidal
|
||||
rule to the Bromwich contour inversion integral.
|
||||
|
||||
.. math ::
|
||||
|
||||
p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M
|
||||
|
||||
where
|
||||
|
||||
.. math ::
|
||||
|
||||
\gamma = \frac{2}{3} (d + \log(10) + \log(2 t)),
|
||||
|
||||
`d = \mathrm{dps\_goal}`, which is chosen based on the desired
|
||||
accuracy using the method developed in [1] to improve numerical
|
||||
stability. The Cohen algorithm shows robustness similar to the de Hoog
|
||||
et al. algorithm, but it is faster than the fixed Talbot algorithm.
|
||||
|
||||
**Optional arguments**
|
||||
|
||||
*degree*
|
||||
integer order of the approximation (M = number of terms)
|
||||
*alpha*
|
||||
abscissa for `p_0` (controls the discretization error)
|
||||
|
||||
The working precision will be increased according to a rule of
|
||||
thumb. If 'degree' is not specified, the working precision and
|
||||
degree are chosen to hopefully achieve the dps of the calling
|
||||
context. If 'degree' is specified, the working precision is
|
||||
chosen to achieve maximum resulting precision for the
|
||||
specified degree.
|
||||
|
||||
**References**
|
||||
|
||||
1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss
|
||||
distribution in the Gaussian copula model: a comparison of methods.
|
||||
*Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057
|
||||
|
||||
"""
|
||||
self.t = self.ctx.convert(t)
|
||||
|
||||
if 'degree' in kwargs:
|
||||
self.degree = kwargs['degree']
|
||||
self.dps_goal = int(1.5 * self.degree)
|
||||
else:
|
||||
self.dps_goal = int(self.ctx.dps * 1.74)
|
||||
self.degree = max(22, int(1.31 * self.dps_goal))
|
||||
|
||||
M = self.degree + 1
|
||||
|
||||
# this is adjusting the dps of the calling context hopefully
|
||||
# the caller doesn't monkey around with it between calling
|
||||
# this routine and calc_time_domain_solution()
|
||||
self.dps_orig = self.ctx.dps
|
||||
self.ctx.dps = self.dps_goal
|
||||
|
||||
ttwo = 2 * self.t
|
||||
tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo)
|
||||
tmp = self.ctx.fraction(2, 3) * tmp
|
||||
self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
|
||||
|
||||
# all but time-dependent part of p
|
||||
a_t = self.alpha / ttwo
|
||||
p_t = self.ctx.pi * 1j / self.t
|
||||
|
||||
self.p = self.ctx.matrix(M, 1)
|
||||
self.p[0] = a_t
|
||||
|
||||
for i in range(1, M):
|
||||
self.p[i] = a_t + i * p_t
|
||||
|
||||
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
||||
r"""Calculate time-domain solution for Cohen algorithm.
|
||||
|
||||
The accelerated nearly alternating series is:
|
||||
|
||||
.. math ::
|
||||
|
||||
f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2}
|
||||
\Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) -
|
||||
\sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f}
|
||||
\left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right],
|
||||
|
||||
where coefficients `\frac{c_{M, k}}{d_M}` are described in [1].
|
||||
|
||||
1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence
|
||||
acceleration of alternating series. *Experiment. Math* 9(1):3-12
|
||||
|
||||
"""
|
||||
self.t = self.ctx.convert(t)
|
||||
|
||||
n = self.degree
|
||||
M = n + 1
|
||||
|
||||
A = self.ctx.matrix(M, 1)
|
||||
for i in range(M):
|
||||
A[i] = fp[i].real
|
||||
|
||||
d = (3 + self.ctx.sqrt(8)) ** n
|
||||
d = (d + 1 / d) / 2
|
||||
b = -self.ctx.one
|
||||
c = -d
|
||||
s = 0
|
||||
|
||||
for k in range(n):
|
||||
c = b - c
|
||||
s = s + c * A[k + 1]
|
||||
b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
|
||||
|
||||
result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d)
|
||||
|
||||
# setting dps back to value when calc_laplace_parameter was
|
||||
# called, unless flag is set.
|
||||
if not manual_prec:
|
||||
self.ctx.dps = self.dps_orig
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ****************************************
|
||||
|
||||
class LaplaceTransformInversionMethods(object):
|
||||
def __init__(ctx, *args, **kwargs):
|
||||
ctx._fixed_talbot = FixedTalbot(ctx)
|
||||
ctx._stehfest = Stehfest(ctx)
|
||||
ctx._de_hoog = deHoog(ctx)
|
||||
ctx._cohen = Cohen(ctx)
|
||||
|
||||
def invertlaplace(ctx, f, t, **kwargs):
|
||||
r"""Computes the numerical inverse Laplace transform for a
|
||||
Laplace-space function at a given time. The function being
|
||||
evaluated is assumed to be a real-valued function of time.
|
||||
|
||||
The user must supply a Laplace-space function `\bar{f}(p)`,
|
||||
and a desired time at which to estimate the time-domain
|
||||
solution `f(t)`.
|
||||
|
||||
A few basic examples of Laplace-space functions with known
|
||||
inverses (see references [1,2]) :
|
||||
|
||||
.. math ::
|
||||
|
||||
\mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p)
|
||||
|
||||
.. math ::
|
||||
|
||||
\mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t)
|
||||
|
||||
.. math ::
|
||||
|
||||
\bar{f}(p) = \frac{1}{(p+1)^2}
|
||||
|
||||
.. math ::
|
||||
|
||||
f(t) = t e^{-t}
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> tt = [0.001, 0.01, 0.1, 1, 10]
|
||||
>>> fp = lambda p: 1/(p+1)**2
|
||||
>>> ft = lambda t: t*exp(-t)
|
||||
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot')
|
||||
(0.000999000499833375, 8.57923043561212e-20)
|
||||
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot')
|
||||
(0.00990049833749168, 3.27007646698047e-19)
|
||||
>>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot')
|
||||
(0.090483741803596, -1.75215800052168e-18)
|
||||
>>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot')
|
||||
(0.367879441171442, 1.2428864009344e-17)
|
||||
>>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot')
|
||||
(0.000453999297624849, 4.04513489306658e-20)
|
||||
|
||||
The methods also work for higher precision:
|
||||
|
||||
>>> mp.dps = 100; mp.pretty = True
|
||||
>>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15)
|
||||
('0.000999000499833375', '-4.96868310693356e-105')
|
||||
>>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15)
|
||||
('0.00990049833749168', '1.23032291513122e-104')
|
||||
|
||||
.. math ::
|
||||
|
||||
\bar{f}(p) = \frac{1}{p^2+1}
|
||||
|
||||
.. math ::
|
||||
|
||||
f(t) = \mathrm{J}_0(t)
|
||||
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> fp = lambda p: 1/sqrt(p*p + 1)
|
||||
>>> ft = lambda t: besselj(0,t)
|
||||
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog')
|
||||
(0.999999750000016, -6.09717765032273e-18)
|
||||
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog')
|
||||
(0.99997500015625, -5.61756281076169e-17)
|
||||
|
||||
.. math ::
|
||||
|
||||
\bar{f}(p) = \frac{\log p}{p}
|
||||
|
||||
.. math ::
|
||||
|
||||
f(t) = -\gamma -\log t
|
||||
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> fp = lambda p: log(p)/p
|
||||
>>> ft = lambda t: -euler-log(t)
|
||||
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest')
|
||||
(6.3305396140806, -1.92126634837863e-16)
|
||||
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest')
|
||||
(4.02795452108656, -4.81486093200704e-16)
|
||||
|
||||
**Options**
|
||||
|
||||
:func:`~mpmath.invertlaplace` recognizes the following optional
|
||||
keywords valid for all methods:
|
||||
|
||||
*method*
|
||||
Chooses numerical inverse Laplace transform algorithm
|
||||
(described below).
|
||||
*degree*
|
||||
Number of terms used in the approximation
|
||||
|
||||
**Algorithms**
|
||||
|
||||
Mpmath implements four numerical inverse Laplace transform
|
||||
algorithms, attributed to: Talbot, Stehfest, and de Hoog,
|
||||
Knight and Stokes. These can be selected by using
|
||||
*method='talbot'*, *method='stehfest'*, *method='dehoog'* or
|
||||
*method='cohen'* or by passing the classes *method=FixedTalbot*,
|
||||
*method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions
|
||||
:func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`,
|
||||
:func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen`
|
||||
are also available as shortcuts.
|
||||
|
||||
All four algorithms implement a heuristic balance between the
|
||||
requested precision and the precision used internally for the
|
||||
calculations. This has been tuned for a typical exponentially
|
||||
decaying function and precision up to few hundred decimal
|
||||
digits.
|
||||
|
||||
The Laplace transform converts the variable time (i.e., along
|
||||
a line) into a parameter given by the right half of the
|
||||
complex `p`-plane. Singularities, poles, and branch cuts in
|
||||
the complex `p`-plane contain all the information regarding
|
||||
the time behavior of the corresponding function. Any numerical
|
||||
method must therefore sample `p`-plane "close enough" to the
|
||||
singularities to accurately characterize them, while not
|
||||
getting too close to have catastrophic cancellation, overflow,
|
||||
or underflow issues. Most significantly, if one or more of the
|
||||
singularities in the `p`-plane is not on the left side of the
|
||||
Bromwich contour, its effects will be left out of the computed
|
||||
solution, and the answer will be completely wrong.
|
||||
|
||||
*Talbot*
|
||||
|
||||
The fixed Talbot method is high accuracy and fast, but the
|
||||
method can catastrophically fail for certain classes of time-domain
|
||||
behavior, including a Heaviside step function for positive
|
||||
time (e.g., `H(t-2)`), or some oscillatory behaviors. The
|
||||
Talbot method usually has adjustable parameters, but the
|
||||
"fixed" variety implemented here does not. This method
|
||||
deforms the Bromwich integral contour in the shape of a
|
||||
parabola towards `-\infty`, which leads to problems
|
||||
when the solution has a decaying exponential in it (e.g., a
|
||||
Heaviside step function is equivalent to multiplying by a
|
||||
decaying exponential in Laplace space).
|
||||
|
||||
*Stehfest*
|
||||
|
||||
The Stehfest algorithm only uses abscissa along the real axis
|
||||
of the complex `p`-plane to estimate the time-domain
|
||||
function. Oscillatory time-domain functions have poles away
|
||||
from the real axis, so this method does not work well with
|
||||
oscillatory functions, especially high-frequency ones. This
|
||||
method also depends on summation of terms in a series that
|
||||
grows very large, and will have catastrophic cancellation
|
||||
during summation if the working precision is too low.
|
||||
|
||||
*de Hoog et al.*
|
||||
|
||||
The de Hoog, Knight, and Stokes method is essentially a
|
||||
Fourier-series quadrature-type approximation to the Bromwich
|
||||
contour integral, with non-linear series acceleration and an
|
||||
analytical expression for the remainder term. This method is
|
||||
typically one of the most robust. This method also involves the
|
||||
greatest amount of overhead, so it is typically the slowest of the
|
||||
four methods at high precision.
|
||||
|
||||
*Cohen*
|
||||
|
||||
The Cohen method is a trapezoidal rule approximation to the Bromwich
|
||||
contour integral, with linear acceleration for alternating
|
||||
series. This method is as robust as the de Hoog et al method and the
|
||||
fastest of the four methods at high precision, and is therefore the
|
||||
default method.
|
||||
|
||||
**Singularities**
|
||||
|
||||
All numerical inverse Laplace transform methods have problems
|
||||
at large time when the Laplace-space function has poles,
|
||||
singularities, or branch cuts to the right of the origin in
|
||||
the complex plane. For simple poles in `\bar{f}(p)` at the
|
||||
`p`-plane origin, the time function is constant in time (e.g.,
|
||||
`\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at
|
||||
`p=0`). A pole in `\bar{f}(p)` to the left of the origin is a
|
||||
decreasing function of time (e.g., `\mathcal{L}\left\lbrace
|
||||
e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and
|
||||
a pole to the right of the origin leads to an increasing
|
||||
function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4}
|
||||
\right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When
|
||||
singularities occur off the real `p` axis, the time-domain
|
||||
function is oscillatory. For example `\mathcal{L}\left\lbrace
|
||||
\mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut
|
||||
starting at `p=j=\sqrt{-1}` and is a decaying oscillatory
|
||||
function, This range of behaviors is illustrated in Duffy [3]
|
||||
Figure 4.10.4, p. 228.
|
||||
|
||||
In general as `p \rightarrow \infty` `t \rightarrow 0` and
|
||||
vice-versa. All numerical inverse Laplace transform methods
|
||||
require their abscissa to shift closer to the origin for
|
||||
larger times. If the abscissa shift left of the rightmost
|
||||
singularity in the Laplace domain, the answer will be
|
||||
completely wrong (the effect of singularities to the right of
|
||||
the Bromwich contour are not included in the results).
|
||||
|
||||
For example, the following exponentially growing function has
|
||||
a pole at `p=3`:
|
||||
|
||||
.. math ::
|
||||
|
||||
\bar{f}(p)=\frac{1}{p^2-9}
|
||||
|
||||
.. math ::
|
||||
|
||||
f(t)=\frac{1}{3}\sinh 3t
|
||||
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> fp = lambda p: 1/(p*p-9)
|
||||
>>> ft = lambda t: sinh(3*t)/3
|
||||
>>> tt = [0.01,0.1,1.0,10.0]
|
||||
>>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot')
|
||||
(0.0100015000675014, 0.0100015000675014)
|
||||
>>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot')
|
||||
(0.101506764482381, 0.101506764482381)
|
||||
>>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot')
|
||||
(3.33929164246997, 3.33929164246997)
|
||||
>>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot')
|
||||
(1781079096920.74, -1.61331069624091e-14)
|
||||
|
||||
**References**
|
||||
|
||||
1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4)
|
||||
2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform
|
||||
Inversion, Springer.
|
||||
3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press.
|
||||
|
||||
**Numerical Inverse Laplace Transform Reviews**
|
||||
|
||||
1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical
|
||||
inversion of the Laplace transform: Applications to Biology,
|
||||
Economics, Engineering, and Physics*. Elsevier.
|
||||
2. Davies, B., B. Martin (1979). Numerical inversion of the
|
||||
Laplace transform: a survey and comparison of methods. *Journal
|
||||
of Computational Physics* 33:1-32,
|
||||
http://dx.doi.org/10.1016/0021-9991(79)90025-1
|
||||
3. Duffy, D.G. (1993). On the numerical inversion of Laplace
|
||||
transforms: Comparison of three new methods on characteristic
|
||||
problems from applications. *ACM Transactions on Mathematical
|
||||
Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788
|
||||
4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform
|
||||
Algorithms for Laplace-Space Numerical Approaches, *Numerical
|
||||
Algorithms*, 63(2):339-355.
|
||||
http://dx.doi.org/10.1007/s11075-012-9625-3
|
||||
|
||||
"""
|
||||
|
||||
rule = kwargs.get('method', 'cohen')
|
||||
if type(rule) is str:
|
||||
lrule = rule.lower()
|
||||
if lrule == 'talbot':
|
||||
rule = ctx._fixed_talbot
|
||||
elif lrule == 'stehfest':
|
||||
rule = ctx._stehfest
|
||||
elif lrule == 'dehoog':
|
||||
rule = ctx._de_hoog
|
||||
elif rule == 'cohen':
|
||||
rule = ctx._cohen
|
||||
else:
|
||||
raise ValueError("unknown invlap algorithm: %s" % rule)
|
||||
else:
|
||||
rule = rule(ctx)
|
||||
|
||||
# determine the vector of Laplace-space parameter
|
||||
# needed for the requested method and desired time
|
||||
rule.calc_laplace_parameter(t, **kwargs)
|
||||
|
||||
# compute the Laplace-space function evalutations
|
||||
# at the required abscissa.
|
||||
fp = [f(p) for p in rule.p]
|
||||
|
||||
# compute the time-domain solution from the
|
||||
# Laplace-space function evaluations
|
||||
return rule.calc_time_domain_solution(fp, t)
|
||||
|
||||
# shortcuts for the above function for specific methods
|
||||
def invlaptalbot(ctx, *args, **kwargs):
|
||||
kwargs['method'] = 'talbot'
|
||||
return ctx.invertlaplace(*args, **kwargs)
|
||||
|
||||
def invlapstehfest(ctx, *args, **kwargs):
|
||||
kwargs['method'] = 'stehfest'
|
||||
return ctx.invertlaplace(*args, **kwargs)
|
||||
|
||||
def invlapdehoog(ctx, *args, **kwargs):
|
||||
kwargs['method'] = 'dehoog'
|
||||
return ctx.invertlaplace(*args, **kwargs)
|
||||
|
||||
def invlapcohen(ctx, *args, **kwargs):
|
||||
kwargs['method'] = 'cohen'
|
||||
return ctx.invertlaplace(*args, **kwargs)
|
||||
|
||||
|
||||
# ****************************************
|
||||
|
||||
if __name__ == '__main__':
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
@@ -0,0 +1,288 @@
|
||||
from bisect import bisect
|
||||
from ..libmp.backend import xrange
|
||||
|
||||
class ODEMethods(object):
|
||||
pass
|
||||
|
||||
def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
|
||||
h = tol = ctx.ldexp(1, -tol_prec)
|
||||
dim = len(y0)
|
||||
xs = [x0]
|
||||
ys = [y0]
|
||||
x = x0
|
||||
y = y0
|
||||
orig = ctx.prec
|
||||
try:
|
||||
ctx.prec = orig*(1+n)
|
||||
# Use n steps with Euler's method to get
|
||||
# evaluation points for derivatives
|
||||
for i in range(n):
|
||||
fxy = derivs(x, y)
|
||||
y = [y[i]+h*fxy[i] for i in xrange(len(y))]
|
||||
x += h
|
||||
xs.append(x)
|
||||
ys.append(y)
|
||||
# Compute derivatives
|
||||
ser = [[] for d in range(dim)]
|
||||
for j in range(n+1):
|
||||
s = [0]*dim
|
||||
b = (-1) ** (j & 1)
|
||||
k = 1
|
||||
for i in range(j+1):
|
||||
for d in range(dim):
|
||||
s[d] += b * ys[i][d]
|
||||
b = (b * (j-k+1)) // (-k)
|
||||
k += 1
|
||||
scale = h**(-j) / ctx.fac(j)
|
||||
for d in range(dim):
|
||||
s[d] = s[d] * scale
|
||||
ser[d].append(s[d])
|
||||
finally:
|
||||
ctx.prec = orig
|
||||
# Estimate radius for which we can get full accuracy.
|
||||
# XXX: do this right for zeros
|
||||
radius = ctx.one
|
||||
for ts in ser:
|
||||
if ts[-1]:
|
||||
radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
|
||||
radius /= 2 # XXX
|
||||
return ser, x0+radius
|
||||
|
||||
def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
|
||||
r"""
|
||||
Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
|
||||
that is a numerical solution of the `n+1`-dimensional first-order
|
||||
ordinary differential equation (ODE) system
|
||||
|
||||
.. math ::
|
||||
|
||||
y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
||||
|
||||
y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
||||
|
||||
\vdots
|
||||
|
||||
y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
||||
|
||||
The derivatives are specified by the vector-valued function
|
||||
*F* that evaluates
|
||||
`[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
|
||||
The initial point `x_0` is specified by the scalar argument *x0*,
|
||||
and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
|
||||
specified by the vector argument *y0*.
|
||||
|
||||
For convenience, if the system is one-dimensional, you may optionally
|
||||
provide just a scalar value for *y0*. In this case, *F* should accept
|
||||
a scalar *y* argument and return a scalar. The solution function
|
||||
*y* will return scalar values instead of length-1 vectors.
|
||||
|
||||
Evaluation of the solution function `y(x)` is permitted
|
||||
for any `x \ge x_0`.
|
||||
|
||||
A high-order ODE can be solved by transforming it into first-order
|
||||
vector form. This transformation is described in standard texts
|
||||
on ODEs. Examples will also be given below.
|
||||
|
||||
**Options, speed and accuracy**
|
||||
|
||||
By default, :func:`~mpmath.odefun` uses a high-order Taylor series
|
||||
method. For reasonably well-behaved problems, the solution will
|
||||
be fully accurate to within the working precision. Note that
|
||||
*F* must be possible to evaluate to very high precision
|
||||
for the generation of Taylor series to work.
|
||||
|
||||
To get a faster but less accurate solution, you can set a large
|
||||
value for *tol* (which defaults roughly to *eps*). If you just
|
||||
want to plot the solution or perform a basic simulation,
|
||||
*tol = 0.01* is likely sufficient.
|
||||
|
||||
The *degree* argument controls the degree of the solver (with
|
||||
*method='taylor'*, this is the degree of the Taylor series
|
||||
expansion). A higher degree means that a longer step can be taken
|
||||
before a new local solution must be generated from *F*,
|
||||
meaning that fewer steps are required to get from `x_0` to a given
|
||||
`x_1`. On the other hand, a higher degree also means that each
|
||||
local solution becomes more expensive (i.e., more evaluations of
|
||||
*F* are required per step, and at higher precision).
|
||||
|
||||
The optimal setting therefore involves a tradeoff. Generally,
|
||||
decreasing the *degree* for Taylor series is likely to give faster
|
||||
solution at low precision, while increasing is likely to be better
|
||||
at higher precision.
|
||||
|
||||
The function
|
||||
object returned by :func:`~mpmath.odefun` caches the solutions at all step
|
||||
points and uses polynomial interpolation between step points.
|
||||
Therefore, once `y(x_1)` has been evaluated for some `x_1`,
|
||||
`y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
|
||||
and continuing the evaluation up to `x_2 > x_1` is also fast.
|
||||
|
||||
**Examples of first-order ODEs**
|
||||
|
||||
We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
|
||||
which has explicit solution `y(x) = \exp(x)`::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> f = odefun(lambda x, y: y, 0, 1)
|
||||
>>> for x in [0, 1, 2.5]:
|
||||
... print((f(x), exp(x)))
|
||||
...
|
||||
(1.0, 1.0)
|
||||
(2.71828182845905, 2.71828182845905)
|
||||
(12.1824939607035, 12.1824939607035)
|
||||
|
||||
The solution with high precision::
|
||||
|
||||
>>> mp.dps = 50
|
||||
>>> f = odefun(lambda x, y: y, 0, 1)
|
||||
>>> f(1)
|
||||
2.7182818284590452353602874713526624977572470937
|
||||
>>> exp(1)
|
||||
2.7182818284590452353602874713526624977572470937
|
||||
|
||||
Using the more general vectorized form, the test problem
|
||||
can be input as (note that *f* returns a 1-element vector)::
|
||||
|
||||
>>> mp.dps = 15
|
||||
>>> f = odefun(lambda x, y: [y[0]], 0, [1])
|
||||
>>> f(1)
|
||||
[2.71828182845905]
|
||||
|
||||
:func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
|
||||
impossible (and at best difficult) to solve analytically. As
|
||||
an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
|
||||
for `y(0) = \pi/2`. An exact solution happens to be known
|
||||
for this problem, and is given by
|
||||
`y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
|
||||
|
||||
>>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
|
||||
>>> for x in [2, 5, 10]:
|
||||
... print((f(x), 2*atan(exp(mpf(x)**2/2))))
|
||||
...
|
||||
(2.87255666284091, 2.87255666284091)
|
||||
(3.14158520028345, 3.14158520028345)
|
||||
(3.14159265358979, 3.14159265358979)
|
||||
|
||||
If `F` is independent of `y`, an ODE can be solved using direct
|
||||
integration. We can therefore obtain a reference solution with
|
||||
:func:`~mpmath.quad`::
|
||||
|
||||
>>> f = lambda x: (1+x**2)/(1+x**3)
|
||||
>>> g = odefun(lambda x, y: f(x), pi, 0)
|
||||
>>> g(2*pi)
|
||||
0.72128263801696
|
||||
>>> quad(f, [pi, 2*pi])
|
||||
0.72128263801696
|
||||
|
||||
**Examples of second-order ODEs**
|
||||
|
||||
We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
|
||||
To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
|
||||
whereby the original equation can be written as `y_1' + y_0' = 0`. Put
|
||||
together, we get the first-order, two-dimensional vector ODE
|
||||
|
||||
.. math ::
|
||||
|
||||
\begin{cases}
|
||||
y_0' = y_1 \\
|
||||
y_1' = -y_0
|
||||
\end{cases}
|
||||
|
||||
To get a well-defined IVP, we need two initial values. With
|
||||
`y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
|
||||
course be solved by `y(x) = y_0(x) = \cos(x)` and
|
||||
`-y'(x) = y_1(x) = \sin(x)`. We check this::
|
||||
|
||||
>>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
|
||||
>>> for x in [0, 1, 2.5, 10]:
|
||||
... nprint(f(x), 15)
|
||||
... nprint([cos(x), sin(x)], 15)
|
||||
... print("---")
|
||||
...
|
||||
[1.0, 0.0]
|
||||
[1.0, 0.0]
|
||||
---
|
||||
[0.54030230586814, 0.841470984807897]
|
||||
[0.54030230586814, 0.841470984807897]
|
||||
---
|
||||
[-0.801143615546934, 0.598472144103957]
|
||||
[-0.801143615546934, 0.598472144103957]
|
||||
---
|
||||
[-0.839071529076452, -0.54402111088937]
|
||||
[-0.839071529076452, -0.54402111088937]
|
||||
---
|
||||
|
||||
Note that we get both the sine and the cosine solutions
|
||||
simultaneously.
|
||||
|
||||
**TODO**
|
||||
|
||||
* Better automatic choice of degree and step size
|
||||
* Make determination of Taylor series convergence radius
|
||||
more robust
|
||||
* Allow solution for `x < x_0`
|
||||
* Allow solution for complex `x`
|
||||
* Test for difficult (ill-conditioned) problems
|
||||
* Implement Runge-Kutta and other algorithms
|
||||
|
||||
"""
|
||||
if tol:
|
||||
tol_prec = int(-ctx.log(tol, 2))+10
|
||||
else:
|
||||
tol_prec = ctx.prec+10
|
||||
degree = degree or (3 + int(3*ctx.dps/2.))
|
||||
workprec = ctx.prec + 40
|
||||
try:
|
||||
len(y0)
|
||||
return_vector = True
|
||||
except TypeError:
|
||||
F_ = F
|
||||
F = lambda x, y: [F_(x, y[0])]
|
||||
y0 = [y0]
|
||||
return_vector = False
|
||||
ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
|
||||
series_boundaries = [x0, xb]
|
||||
series_data = [(ser, x0, xb)]
|
||||
# We will be working with vectors of Taylor series
|
||||
def mpolyval(ser, a):
|
||||
return [ctx.polyval(s[::-1], a) for s in ser]
|
||||
# Find nearest expansion point; compute if necessary
|
||||
def get_series(x):
|
||||
if x < x0:
|
||||
raise ValueError
|
||||
n = bisect(series_boundaries, x)
|
||||
if n < len(series_boundaries):
|
||||
return series_data[n-1]
|
||||
while 1:
|
||||
ser, xa, xb = series_data[-1]
|
||||
if verbose:
|
||||
print("Computing Taylor series for [%f, %f]" % (xa, xb))
|
||||
y = mpolyval(ser, xb-xa)
|
||||
xa = xb
|
||||
ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
|
||||
series_boundaries.append(xb)
|
||||
series_data.append((ser, xa, xb))
|
||||
if x <= xb:
|
||||
return series_data[-1]
|
||||
# Evaluation function
|
||||
def interpolant(x):
|
||||
x = ctx.convert(x)
|
||||
orig = ctx.prec
|
||||
try:
|
||||
ctx.prec = workprec
|
||||
ser, xa, xb = get_series(x)
|
||||
y = mpolyval(ser, x-xa)
|
||||
finally:
|
||||
ctx.prec = orig
|
||||
if return_vector:
|
||||
return [+yk for yk in y]
|
||||
else:
|
||||
return +y[0]
|
||||
return interpolant
|
||||
|
||||
ODEMethods.odefun = odefun
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
doctest.testmod()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,213 @@
|
||||
from ..libmp.backend import xrange
|
||||
from .calculus import defun
|
||||
|
||||
#----------------------------------------------------------------------------#
|
||||
# Polynomials #
|
||||
#----------------------------------------------------------------------------#
|
||||
|
||||
# XXX: extra precision
|
||||
@defun
|
||||
def polyval(ctx, coeffs, x, derivative=False):
|
||||
r"""
|
||||
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
|
||||
:func:`~mpmath.polyval` evaluates the polynomial
|
||||
|
||||
.. math ::
|
||||
|
||||
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
|
||||
|
||||
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
|
||||
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
|
||||
tuple `(P(x), P'(x))`.
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.pretty = True
|
||||
>>> polyval([3, 0, 2], 0.5)
|
||||
2.75
|
||||
>>> polyval([3, 0, 2], 0.5, derivative=True)
|
||||
(2.75, 3.0)
|
||||
|
||||
The coefficients and the evaluation point may be any combination
|
||||
of real or complex numbers.
|
||||
"""
|
||||
if not coeffs:
|
||||
return ctx.zero
|
||||
p = ctx.convert(coeffs[0])
|
||||
q = ctx.zero
|
||||
for c in coeffs[1:]:
|
||||
if derivative:
|
||||
q = p + x*q
|
||||
p = c + x*p
|
||||
if derivative:
|
||||
return p, q
|
||||
else:
|
||||
return p
|
||||
|
||||
@defun
|
||||
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
|
||||
error=False, roots_init=None):
|
||||
"""
|
||||
Computes all roots (real or complex) of a given polynomial.
|
||||
|
||||
The roots are returned as a sorted list, where real roots appear first
|
||||
followed by complex conjugate roots as adjacent elements. The polynomial
|
||||
should be given as a list of coefficients, in the format used by
|
||||
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
|
||||
|
||||
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
|
||||
where *err* is an estimate of the maximum error among the computed roots.
|
||||
|
||||
**Examples**
|
||||
|
||||
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
|
||||
|
||||
>>> from mpmath import *
|
||||
>>> mp.dps = 15; mp.pretty = True
|
||||
>>> nprint(polyroots([1,-1,-14,24]), 4)
|
||||
[-4.0, 2.0, 3.0]
|
||||
|
||||
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
|
||||
error estimate::
|
||||
|
||||
>>> roots, err = polyroots([4,3,2], error=True)
|
||||
>>> for r in roots:
|
||||
... print(r)
|
||||
...
|
||||
(-0.375 + 0.59947894041409j)
|
||||
(-0.375 - 0.59947894041409j)
|
||||
>>>
|
||||
>>> err
|
||||
2.22044604925031e-16
|
||||
>>>
|
||||
>>> polyval([4,3,2], roots[0])
|
||||
(2.22044604925031e-16 + 0.0j)
|
||||
>>> polyval([4,3,2], roots[1])
|
||||
(2.22044604925031e-16 + 0.0j)
|
||||
|
||||
The following example computes all the 5th roots of unity; that is,
|
||||
the roots of `x^5 - 1`::
|
||||
|
||||
>>> mp.dps = 20
|
||||
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
|
||||
... print(r)
|
||||
...
|
||||
1.0
|
||||
(-0.8090169943749474241 + 0.58778525229247312917j)
|
||||
(-0.8090169943749474241 - 0.58778525229247312917j)
|
||||
(0.3090169943749474241 + 0.95105651629515357212j)
|
||||
(0.3090169943749474241 - 0.95105651629515357212j)
|
||||
|
||||
**Precision and conditioning**
|
||||
|
||||
The roots are computed to the current working precision accuracy. If this
|
||||
accuracy cannot be achieved in ``maxsteps`` steps, then a
|
||||
``NoConvergence`` exception is raised. The algorithm internally is using
|
||||
the current working precision extended by ``extraprec``. If
|
||||
``NoConvergence`` was raised, that is caused either by not having enough
|
||||
extra precision to achieve convergence (in which case increasing
|
||||
``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
|
||||
case increasing ``maxsteps`` should fix the problem), or a combination of
|
||||
both.
|
||||
|
||||
The user should always do a convergence study with regards to
|
||||
``extraprec`` to ensure accurate results. It is possible to get
|
||||
convergence to a wrong answer with too low ``extraprec``.
|
||||
|
||||
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
|
||||
typically compute all roots of an arbitrary polynomial to high precision::
|
||||
|
||||
>>> mp.dps = 60
|
||||
>>> for r in polyroots([1, 0, -10, 0, 1]):
|
||||
... print(r)
|
||||
...
|
||||
-3.14626436994197234232913506571557044551247712918732870123249
|
||||
-0.317837245195782244725757617296174288373133378433432554879127
|
||||
0.317837245195782244725757617296174288373133378433432554879127
|
||||
3.14626436994197234232913506571557044551247712918732870123249
|
||||
>>>
|
||||
>>> sqrt(3) + sqrt(2)
|
||||
3.14626436994197234232913506571557044551247712918732870123249
|
||||
>>> sqrt(3) - sqrt(2)
|
||||
0.317837245195782244725757617296174288373133378433432554879127
|
||||
|
||||
**Algorithm**
|
||||
|
||||
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
|
||||
uses complex arithmetic to locate all roots simultaneously.
|
||||
The Durand-Kerner method can be viewed as approximately performing
|
||||
simultaneous Newton iteration for all the roots. In particular,
|
||||
the convergence to simple roots is quadratic, just like Newton's
|
||||
method.
|
||||
|
||||
Although all roots are internally calculated using complex arithmetic, any
|
||||
root found to have an imaginary part smaller than the estimated numerical
|
||||
error is truncated to a real number (small real parts are also chopped).
|
||||
Real roots are placed first in the returned list, sorted by value. The
|
||||
remaining complex roots are sorted by their real parts so that conjugate
|
||||
roots end up next to each other.
|
||||
|
||||
**References**
|
||||
|
||||
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
|
||||
|
||||
"""
|
||||
if len(coeffs) <= 1:
|
||||
if not coeffs or not coeffs[0]:
|
||||
raise ValueError("Input to polyroots must not be the zero polynomial")
|
||||
# Constant polynomial with no roots
|
||||
return []
|
||||
|
||||
orig = ctx.prec
|
||||
tol = +ctx.eps
|
||||
with ctx.extraprec(extraprec):
|
||||
deg = len(coeffs) - 1
|
||||
# Must be monic
|
||||
lead = ctx.convert(coeffs[0])
|
||||
if lead == 1:
|
||||
coeffs = [ctx.convert(c) for c in coeffs]
|
||||
else:
|
||||
coeffs = [c/lead for c in coeffs]
|
||||
f = lambda x: ctx.polyval(coeffs, x)
|
||||
if roots_init is None:
|
||||
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
|
||||
else:
|
||||
roots = [None]*deg;
|
||||
deg_init = min(deg, len(roots_init))
|
||||
roots[:deg_init] = list(roots_init[:deg_init])
|
||||
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
|
||||
in xrange(deg_init,deg)]
|
||||
err = [ctx.one for n in xrange(deg)]
|
||||
# Durand-Kerner iteration until convergence
|
||||
for step in xrange(maxsteps):
|
||||
if abs(max(err)) < tol:
|
||||
break
|
||||
for i in xrange(deg):
|
||||
p = roots[i]
|
||||
x = f(p)
|
||||
for j in range(deg):
|
||||
if i != j:
|
||||
try:
|
||||
x /= (p-roots[j])
|
||||
except ZeroDivisionError:
|
||||
continue
|
||||
roots[i] = p - x
|
||||
err[i] = abs(x)
|
||||
if abs(max(err)) >= tol:
|
||||
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
|
||||
% maxsteps)
|
||||
# Remove small real or imaginary parts
|
||||
if cleanup:
|
||||
for i in xrange(deg):
|
||||
if abs(roots[i]) < tol:
|
||||
roots[i] = ctx.zero
|
||||
elif abs(ctx._im(roots[i])) < tol:
|
||||
roots[i] = roots[i].real
|
||||
elif abs(ctx._re(roots[i])) < tol:
|
||||
roots[i] = roots[i].imag * 1j
|
||||
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
|
||||
if error:
|
||||
err = max(err)
|
||||
err = max(err, ctx.ldexp(1, -orig+1))
|
||||
return [+r for r in roots], +err
|
||||
else:
|
||||
return [+r for r in roots]
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user