Skip to content

Commit 1d5972b

Browse files
committed
realized symbols in the code are perfectly backwards from the math and did a little modification to get them to better agree
1 parent 00ccd92 commit 1d5972b

1 file changed

Lines changed: 12 additions & 12 deletions

File tree

pynumdiff/total_variation_regularization/_total_variation_regularization.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -75,34 +75,34 @@ def tvrdiff(x, dt, order, gamma, huberM=float('inf'), solver=None):
7575
mu = np.mean(x)
7676
sigma = median_abs_deviation(x, scale='normal') # robust alternative to std()
7777
if sigma == 0: sigma = 1 # safety guard
78-
x = (x-mu)/sigma
78+
y = (x-mu)/sigma
7979

8080
# Define the variables for the highest order derivative and the integration constants
81-
deriv_values = cvxpy.Variable(len(x)) # values of the order^th derivative, in which we're penalizing variation
81+
deriv_values = cvxpy.Variable(len(y)) # values of the order^th derivative, in which we're penalizing variation
8282
integration_constants = cvxpy.Variable(order) # constants of integration that help get us back to x
8383

8484
# Recursively integrate the highest order derivative to get back to the position. This is a first-
8585
# order scheme, but it's very fast and tends to do not markedly worse than 2nd order. See #116
86-
# I also tried a trapezoidal integration rule here, and it works no better. See #116 too
87-
y = deriv_values
86+
# I also tried a trapezoidal integration rule here, and it works no better. See #116 too.
87+
hx = deriv_values # variables are integrated to produce the signal estimate variables, \hat{x} in the math
8888
for i in range(order):
89-
y = cvxpy.cumsum(y) + integration_constants[i]
89+
hx = cvxpy.cumsum(hx) + integration_constants[i] # cumsum is like integration assuming dt = 1
9090

9191
# Compare the recursively integrated position to the noisy position. \ell_2 doesn't get scaled by 1/2 here,
9292
# so cvxpy's doubled Huber is already the right scale, and \ell_1 should be scaled by 2\sqrt{2} to match.
93-
fidelity_cost = cvxpy.sum_squares(y - x) if huberM == float('inf') \
94-
else np.sqrt(8)*cvxpy.norm(y - x, 1) if huberM == 0 \
95-
else utility.huber_const(huberM)*cvxpy.sum(cvxpy.huber(y - x, huberM)) # data is already scaled, so M rather than M*sigma
93+
fidelity_cost = cvxpy.sum_squares(y - hx) if huberM == float('inf') \
94+
else np.sqrt(8)*cvxpy.norm(y - hx, 1) if huberM == 0 \
95+
else utility.huber_const(huberM)*cvxpy.sum(cvxpy.huber(y - hx, huberM)) # data is already scaled, so M rather than M*sigma
9696
# Set up and solve the optimization problem
9797
prob = cvxpy.Problem(cvxpy.Minimize(fidelity_cost + gamma*cvxpy.sum(cvxpy.tv(deriv_values)) ))
9898
prob.solve(solver=solver)
9999

100100
# Recursively integrate the final derivative values to get back to the function and derivative values
101-
y = deriv_values.value
101+
v = deriv_values.value
102102
for i in range(order-1): # stop one short to get the first derivative
103-
y = np.cumsum(y) + integration_constants.value[i]
104-
dxdt_hat = y/dt # y only holds the dx values; to get deriv scale by dt
105-
x_hat = np.cumsum(y) + integration_constants.value[order-1] # smoothed data
103+
v = np.cumsum(v) + integration_constants.value[i]
104+
dxdt_hat = v/dt # v only holds the dx values; to get deriv scale by dt
105+
x_hat = np.cumsum(v) + integration_constants.value[order-1] # smoothed data
106106

107107
# Due to the first-order nature of the derivative, it has a slight lag. Average together every two values
108108
# to better center the answer. But this leaves us one-short, so devise a good last value.

0 commit comments

Comments
 (0)