-
Notifications
You must be signed in to change notification settings - Fork 24
Expand file tree
/
Copy pathbasis_fit.py
More file actions
143 lines (116 loc) · 7.02 KB
/
basis_fit.py
File metadata and controls
143 lines (116 loc) · 7.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
"""Methods based on fitting basis functions to data"""
from warnings import warn
import numpy as np
from scipy import sparse
from pynumdiff.utils import utility
#maria spectral diff below
def spectraldiff(x, dt, axis=0, params=None, options=None, high_freq_cutoff=None,
even_extension=True, pad_to_zero_dxdt=True):
"""Take a derivative in the Fourier domain, with high frequency attentuation.
:param np.array[float] x: data to differentiate
:param float dt: step size
:param list[float] or float params: (**deprecated**, prefer :code:`high_freq_cutoff`)
:param dict options: (**deprecated**, prefer :code:`even_extension`
and :code:`pad_to_zero_dxdt`) a dictionary consisting of {'even_extension': (bool), 'pad_to_zero_dxdt': (bool)}
:param float high_freq_cutoff: The high frequency cutoff as a multiple of the Nyquist frequency: Should be between 0
and 1. Frequencies below this threshold will be kept, and above will be zeroed.
:param bool even_extension: if True, extend the data with an even extension so signal starts and ends at the same value.
:param bool pad_to_zero_dxdt: if True, extend the data with extra regions that smoothly force the derivative to
zero before taking FFT.
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if params is not None:
warn("`params` and `options` parameters will be removed in a future version. Use `high_freq_cutoff`, " +
"`even_extension`, and `pad_to_zero_dxdt` instead.", DeprecationWarning)
high_freq_cutoff = params[0] if isinstance(params, list) else params
if options is not None:
if 'even_extension' in options: even_extension = options['even_extension']
if 'pad_to_zero_dxdt' in options: pad_to_zero_dxdt = options['pad_to_zero_dxdt']
elif high_freq_cutoff is None:
raise ValueError("`high_freq_cutoff` must be given.")
x = np.asarray(x)
x0 = np.moveaxis(x, axis, 0) # move time axis to the front of the array
# now x0 dims are (# of data points, # of signals)
L = x0.shape[0]
# make derivative go to zero at ends (optional)
if pad_to_zero_dxdt:
padding = 100
# just pad first and last values x100
first = x0[0:1]
last = x0[-1:]
pre = np.repeat(first, padding, axis=0)
post = np.repeat(last, padding, axis=0)
xpad = np.concatenate((pre, x0, post), axis=0) # i think hstack won't work with the correct axis
kernel = utility.mean_kernel(padding//2)
x_hat0 = utility.convolutional_smoother(xpad, kernel, axis=0)
x_hat0[padding:-padding] = xpad[padding:-padding]
x0 = x_hat0
else:
padding = 0
# Do even extension (optional):
if even_extension is True:
x0 = np.concatenate((x0, x0[::-1, ...]), axis=0)
# Form wavenumbers
N = x0.shape[0]
k = np.concatenate((np.arange(N//2 + 1), np.arange(-N//2 + 1, 0)))
if N % 2 == 0: k[N//2] = 0 # odd derivatives get the Nyquist element zeroed out
# Filter to zero out higher wavenumbers
discrete_cutoff = int(high_freq_cutoff * N / 2) # Nyquist is at N/2 location, and we're cutting off as a fraction of that
filt = np.ones_like(k, dtype=float)
filt = np.ones(k.shape); filt[discrete_cutoff:N-discrete_cutoff] = 0
filt = filt.reshape((N,) + (1,)*(x0.ndim-1))
# Smoothed signal
X = np.fft.fft(x0, axis=0)
x_hat0 = np.real(np.fft.ifft(filt * X, axis=0))
x_hat0 = x_hat0[padding:L+padding]
# Derivative = 90 deg phase shift
omega = 2*np.pi/(dt*N)
k0 = k.reshape((N,) + (1,)*(x0.ndim-1))
dxdt0 = np.real(np.fft.ifft(1j * k0 * omega * filt * X, axis=0))
dxdt0 = dxdt0[padding:L+padding]
# move back to original axis position
x_hat = np.moveaxis(x_hat0, 0, axis)
dxdt_hat = np.moveaxis(dxdt0, 0, axis)
return x_hat, dxdt_hat
def rbfdiff(x, dt_or_t, sigma=1, lmbd=0.01):
"""Find smoothed function and derivative estimates by fitting noisy data with radial-basis-functions. Naively,
fill a matrix with basis function samples and solve a linear inverse problem against the data, but truncate tiny
values to make columns sparse. Each basis function "hill" is topped with a "tower" of height :code:`lmbd` to reach
noisy data samples, and the final smoothed reconstruction is found by razing these and only keeping the hills.
:param np.array[float] x: data to differentiate
:param float or array[float] dt_or_t: This function supports variable step size. This parameter is either the constant
:math:`\\Delta t` if given as a single float, or data locations if given as an array of same length as :code:`x`.
:param float sigma: controls width of radial basis functions
:param float lmbd: controls smoothness
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if np.isscalar(dt_or_t):
t = np.arange(len(x))*dt_or_t
else: # support variable step size for this function
if len(x) != len(dt_or_t): raise ValueError("If `dt_or_t` is given as array-like, must have same length as `x`.")
t = dt_or_t
# The below does the approximate equivalent of this code, but sparsely in O(N sigma^2), since the rbf falls off rapidly
# t_i, t_j = np.meshgrid(t,t)
# r = t_j - t_i # radius
# rbf = np.exp(-(r**2) / (2 * sigma**2)) # radial basis function kernel, O(N^2) entries
# drbfdt = -(r / sigma**2) * rbf # derivative of kernel
# rbf_regularized = rbf + lmbd*np.eye(len(t))
# alpha = np.linalg.solve(rbf_regularized, x) # O(N^3)
cutoff = np.sqrt(-2 * sigma**2 * np.log(1e-4))
rows, cols, vals, dvals = [], [], [], []
for n in range(len(t)): # pylint: disable=consider-using-enumerate
# Only consider points within a cutoff. Gaussian drops below eps at distance ~ sqrt(-2*sigma^2 log eps)
l = np.searchsorted(t, t[n] - cutoff) # O(log N) to find indices of points within cutoff
r = np.searchsorted(t, t[n] + cutoff) # finds index where new value should be inserted
for j in range(l, r): # width of this is dependent on sigma. [l, r) is correct inclusion/exclusion
radius = t[n] - t[j]
v = np.exp(-radius**2 / (2 * sigma**2))
dv = -radius / sigma**2 * v # take derivative of radial basis function, because d/dt coef*f(t) = coef*df/dt
rows.append(n); cols.append(j); vals.append(v); dvals.append(dv)
rbf = sparse.csr_matrix((vals, (rows, cols)), shape=(len(t), len(t))) # Build sparse kernels, O(N sigma) entries
drbfdt = sparse.csr_matrix((dvals, (rows, cols)), shape=(len(t), len(t)))
rbf_regularized = rbf + lmbd*sparse.eye(len(t), format="csr") # identity matrix gives a little extra height at the centers
alpha = sparse.linalg.spsolve(rbf_regularized, x) # solve sparse system targeting the noisy data, O(N sigma^2)
return rbf @ alpha, drbfdt @ alpha # find samples of reconstructions using the smooth bases