-
Notifications
You must be signed in to change notification settings - Fork 24
Expand file tree
/
Copy path_finite_difference.py
More file actions
124 lines (102 loc) · 7.07 KB
/
_finite_difference.py
File metadata and controls
124 lines (102 loc) · 7.07 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
"""This is handy for this module https://web.media.mit.edu/~crtaylor/calculator.html"""
import numpy as np
from pynumdiff.utils import utility
from warnings import warn
def finitediff(x, dt, num_iterations=1, order=2):
"""Perform iterated finite difference of a given order. This serves as the common backing function for
all other methods in this module.
:param np.array[float] x: data to differentiate
:param float dt: step size
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
:param int order: 1, 2, or 4, controls which finite differencing scheme to employ
"""
if num_iterations < 1: raise ValueError("num_iterations must be >0")
if order not in [1, 2, 4]: raise ValueError("order must be 1, 2, or 4")
x_hat = np.asarray(x) # allows for array-like. Preserve reference to x, for finding the final constant of integration
dxdt_hat = np.zeros(x.shape) # preallocate reusable memory
# For all but the last iteration, do the differentate->integrate smoothing loop, being careful with endpoints
for i in range(num_iterations-1):
if order == 1:
dxdt_hat[:-1] = np.diff(x_hat)
dxdt_hat[-1] = dxdt_hat[-2] # using stencil -1,0 vs stencil 0,1 you get an expression for the same value
elif order == 2:
dxdt_hat[1:-1] = (x_hat[2:] - x_hat[:-2])/2 # second-order center-difference formula
dxdt_hat[0] = x_hat[1] - x_hat[0]
dxdt_hat[-1] = x_hat[-1] - x_hat[-2] # use first-order endpoint formulas so as not to amplify noise. See #104
elif order == 4:
dxdt_hat[2:-2] = (8*(x_hat[3:-1] - x_hat[1:-3]) - x_hat[4:] + x_hat[:-4])/12 # fourth-order center-difference
dxdt_hat[1] = (x_hat[2] - x_hat[0])/2
dxdt_hat[-2] = (x_hat[-1] - x_hat[-3])/2 # use second-order formula for next-to-endpoints so as not to amplify noise
dxdt_hat[0] = x_hat[1] - x_hat[0]
dxdt_hat[-1] = x_hat[-1] - x_hat[-2] # use first-order endpoint formulas so as not to amplify noise. See #104
x_hat = utility.integrate_dxdt_hat(dxdt_hat, 1) # estimate new x_hat by integrating derivative
# We can skip dividing by dt here and pass dt=1, because the integration multiplies dt back in.
# No need to find integration constant until the very end, because we just differentiate again.
# Note that I also tried integrating with Simpson's rule here, and it seems to do worse. See #104
if order == 1:
dxdt_hat[:-1] = np.diff(x_hat)
dxdt_hat[-1] = dxdt_hat[-2] # using stencil -1,0 vs stencil 0,1 you get an expression for the same value
elif order == 2:
dxdt_hat[1:-1] = x_hat[2:] - x_hat[:-2] # second-order center-difference formula
dxdt_hat[0] = -3 * x_hat[0] + 4 * x_hat[1] - x_hat[2] # second-order endpoint formulas
dxdt_hat[-1] = 3 * x_hat[-1] - 4 * x_hat[-2] + x_hat[-3]
dxdt_hat /= 2
elif order == 4:
dxdt_hat[2:-2] = 8*(x_hat[3:-1] - x_hat[1:-3]) - x_hat[4:] + x_hat[:-4] # fourth-order center-difference
dxdt_hat[0] = -25*x_hat[0] + 48*x_hat[1] - 36*x_hat[2] + 16*x_hat[3] - 3*x_hat[4]
dxdt_hat[1] = -3*x_hat[0] - 10*x_hat[1] + 18*x_hat[2] - 6*x_hat[3] + x_hat[4]
dxdt_hat[-2] = 3*x_hat[-1] + 10*x_hat[-2] - 18*x_hat[-3] + 6*x_hat[-4] - x_hat[-5]
dxdt_hat[-1] = 25*x_hat[-1] - 48*x_hat[-2] + 36*x_hat[-3] - 16*x_hat[-4] + 3*x_hat[-5]
dxdt_hat /= 12
dxdt_hat /= dt # don't forget to scale by dt, can't skip it this time
if num_iterations > 1: # We've lost a constant of integration in the above
x_hat += utility.estimate_integration_constant(x, x_hat) # uses least squares
return x_hat, dxdt_hat
def first_order(x, dt, params=None, options={}, num_iterations=1):
"""First-order difference method\n
**Deprecated**, prefer :code:`finitediff` with order 1 instead.
:param np.array[float] x: data to differentiate
:param float dt: step size
:param list[float] or float params: (**deprecated**, prefer :code:`num_iterations`)
:param dict options: (**deprecated**, prefer :code:`num_iterations`) a dictionary consisting of {'iterate': (bool)}
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
:return: tuple[np.array, np.array] of\n
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
"""
warn("`first_order` in past releases was actually calculating a second-order FD. Use `second_order` to achieve " +
"approximately the same behavior. Note that odd-order methods have asymmetrical stencils, which causes " +
"horizontal drift in the answer, especially when iterating.", DeprecationWarning)
if params != None and 'iterate' in options:
warn("`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.", DeprecationWarning)
num_iterations = params[0] if isinstance(params, list) else params
warn("`first_order` is deprecated. Call `finitediff` with order 1 instead.", DeprecationWarning)
return finitediff(x, dt, num_iterations, 1)
def second_order(x, dt, num_iterations=1):
"""Second-order centered difference method, with special endpoint formulas.\n
**Deprecated**, prefer :code:`finitediff` with order 2 instead.
:param np.array[float] x: data to differentiate
:param float dt: step size
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
:return: tuple[np.array, np.array] of\n
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
"""
warn("`second_order` is deprecated. Call `finitediff` with order 2 instead.", DeprecationWarning)
return finitediff(x, dt, num_iterations, 2)
def fourth_order(x, dt, num_iterations=1):
"""Fourth-order centered difference method, with special endpoint formulas.\n
**Deprecated**, prefer :code:`finitediff` with order 4 instead.
:param np.array[float] x: data to differentiate
:param float dt: step size
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
:return: tuple[np.array, np.array] of\n
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
"""
warn("`fourth_order` is deprecated. Call `finitediff` with order 4 instead.", DeprecationWarning)
return finitediff(x, dt, num_iterations, 4)