Skip to content

Commit 89f3109

Browse files
authored
Merge pull request #123 from florisvb/higher-order-tvr-fd
investigated #116 and added some comments to help clarify the code
2 parents e5c4713 + d43be08 commit 89f3109

3 files changed

Lines changed: 11 additions & 16 deletions

File tree

pynumdiff/kalman_smooth/_kalman_smooth.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def _kalman_forward_filter(xhat0, P0, y, A, C, Q, R, u=None, B=None):
3636
xhat_pre.append(xhat_) # the [n]th index holds _{n|n-1} info
3737
P_pre.append(P_)
3838

39-
xhat = xhat_.copy()
39+
xhat = xhat_.copy() # copies very important here. See #122
4040
P = P_.copy()
4141
if not np.isnan(y[n]): # handle missing data
4242
K = P_ @ C.T @ np.linalg.inv(C @ P_ @ C.T + R)

pynumdiff/tests/test_diff_methods.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -185,10 +185,10 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs)
185185
[(0, 0), (1, 0), (0, -1), (1, 0)],
186186
[(1, 1), (2, 2), (1, 1), (2, 2)],
187187
[(1, 1), (3, 3), (1, 1), (3, 3)]],
188-
jerk_sliding: [[(-15, -15), (-16, -17), (0, -1), (1, 0)],
188+
jerk_sliding: [[(-15, -15), (-16, -16), (0, -1), (1, 0)],
189189
[(-14, -14), (-14, -14), (0, -1), (0, 0)],
190190
[(-14, -14), (-14, -14), (0, -1), (0, 0)],
191-
[(-1, -1), (0, 0), (0, -1), (0, 0)],
191+
[(-1, -1), (0, 0), (0, -1), (1, 0)],
192192
[(0, 0), (2, 2), (0, 0), (2, 2)],
193193
[(1, 1), (3, 3), (1, 1), (3, 3)]]
194194
}

pynumdiff/total_variation_regularization/_total_variation_regularization.py

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None):
6969
- **x_hat** -- estimated (smoothed) x
7070
- **dxdt_hat** -- estimated derivative of x
7171
"""
72-
# Normalize
72+
# Normalize for numerical consistency with convex solver
7373
mean = np.mean(x)
7474
std = np.std(x)
7575
if std == 0: std = 1 # safety guard
@@ -79,15 +79,16 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None):
7979
deriv_values = cvxpy.Variable(len(x)) # values of the order^th derivative, in which we're penalizing variation
8080
integration_constants = cvxpy.Variable(order) # constants of integration that help get us back to x
8181

82-
# Recursively integrate the highest order derivative to get back to the position
82+
# Recursively integrate the highest order derivative to get back to the position. This is a first-
83+
# order scheme, but it's very fast and tends to not do markedly worse than 2nd order. See #116
8384
y = deriv_values
8485
for i in range(order):
8586
y = cvxpy.cumsum(y) + integration_constants[i]
8687

8788
# Set up and solve the optimization problem
8889
prob = cvxpy.Problem(cvxpy.Minimize(
8990
# Compare the recursively integrated position to the noisy position, and add TVR penalty
90-
cvxpy.sum_squares(y - x) + cvxpy.sum(gamma*cvxpy.tv(deriv_values)) ))
91+
cvxpy.sum_squares(y - x) + gamma*cvxpy.sum(cvxpy.tv(deriv_values)) ))
9192
prob.solve(solver=solver)
9293

9394
# Recursively integrate the final derivative values to get back to the function and derivative values
@@ -97,14 +98,10 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None):
9798
dxdt_hat = y/dt # y only holds the dx values; to get deriv scale by dt
9899
x_hat = np.cumsum(y) + integration_constants.value[order-1] # smoothed data
99100

100-
dxdt_hat = (dxdt_hat[:-1] + dxdt_hat[1:])/2 # take first order FD to smooth a touch
101-
ddxdt_hat_f = dxdt_hat[-1] - dxdt_hat[-2]
102-
dxdt_hat_f = dxdt_hat[-1] + ddxdt_hat_f # What is this doing? Could we use a 2nd order FD above natively?
103-
dxdt_hat = np.hstack((dxdt_hat, dxdt_hat_f))
104-
105-
# fix first point
106-
d = dxdt_hat[2] - dxdt_hat[1]
107-
dxdt_hat[0] = dxdt_hat[1] - d
101+
# Due to the first-order nature of the derivative, it has a slight lag. Average together every two values
102+
# to better center the answer. But this leaves us one-short, so devise a good last value.
103+
dxdt_hat = (dxdt_hat[:-1] + dxdt_hat[1:])/2
104+
dxdt_hat = np.hstack((dxdt_hat, 2*dxdt_hat[-1] - dxdt_hat[-2])) # last value = penultimate value [-1] + diff between [-1] and [-2]
108105

109106
return x_hat*std+mean, dxdt_hat*std # derivative is linear, so scale derivative by std
110107

@@ -262,5 +259,3 @@ def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None, wind
262259
ramp = window_size//5
263260
kernel = np.hstack((np.arange(1, ramp+1)/ramp, np.ones(window_size - 2*ramp), (np.arange(1, ramp+1)/ramp)[::-1]))
264261
return utility.slide_function(_total_variation_regularized_derivative, x, dt, kernel, 3, gamma, stride=ramp, solver=solver)
265-
266-

0 commit comments

Comments
 (0)