Skip to content
6 changes: 3 additions & 3 deletions pynumdiff/linear_model/_linear_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def _polydiff(x, dt, polynomial_order, weights=None):
return _polydiff(x, dt, polynomial_order)

kernel = {'gaussian':utility.gaussian_kernel, 'friedrichs':utility.friedrichs_kernel}[kernel](window_size)
return utility.slide_function(_polydiff, x, dt, kernel, polynomial_order, step_size=step_size, pass_weights=True)
return utility.slide_function(_polydiff, x, dt, kernel, polynomial_order, stride=step_size, pass_weights=True)


#############
Expand Down Expand Up @@ -312,8 +312,8 @@ def _lineardiff(x, dt, order, gamma, solver=None):

kernel = {'gaussian':utility.gaussian_kernel, 'friedrichs':utility.friedrichs_kernel}[kernel](window_size)

x_hat_forward, _ = utility.slide_function(_lineardiff, x, dt, kernel, order, gamma, step_size=step_size, solver=solver)
x_hat_backward, _ = utility.slide_function(_lineardiff, x[::-1], dt, kernel, order, gamma, step_size=step_size, solver=solver)
x_hat_forward, _ = utility.slide_function(_lineardiff, x, dt, kernel, order, gamma, stride=step_size, solver=solver)
x_hat_backward, _ = utility.slide_function(_lineardiff, x[::-1], dt, kernel, order, gamma, stride=step_size, solver=solver)

# weights
w = np.arange(1, len(x_hat_forward)+1,1)[::-1]
Expand Down
32 changes: 23 additions & 9 deletions pynumdiff/tests/test_diff_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,19 @@
from warnings import warn

from ..linear_model import lineardiff, polydiff, savgoldiff, spectraldiff
from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration
from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration, jerk_sliding
from ..kalman_smooth import constant_velocity, constant_acceleration, constant_jerk
from ..smooth_finite_difference import mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff
from ..finite_difference import first_order, second_order
# Function aliases for testing cases where parameters change the behavior in a big way
def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs)

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I briefly had some other vectors up here, but switching between them in the test is annoying, so I've tried to keep everyone on this one t and dt.

dt = 0.1
t = np.arange(0, 3+dt, dt) # sample locations, including the endpoint
t = np.linspace(0, 3, 31) # sample locations, including the endpoint
tt = np.linspace(0, 3) # full domain, for visualizing denser plots
ttt = np.linspace(0, 3, 201) # for testing jerk_sliding, which requires more points
np.random.seed(7) # for repeatability of the test, so we don't get random failures
noise = 0.05*np.random.randn(*t.shape)
long_noise = 0.05*np.random.randn(*ttt.shape)
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I define noise up here before anything runs for consistency. Otherwise I was finding I'd get different answers when running a test on its own versus with everyone else.

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ended up adding window_size to jerk_sliding so that I could test it with the same t and noise as everyone else.


# Analytic (function, derivative) pairs on which to test differentiation methods.
test_funcs_and_derivs = [
Expand Down Expand Up @@ -51,8 +52,8 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs)
(acceleration, {'gamma':1}), (acceleration, [1]),
(jerk, {'gamma':10}), (jerk, [10]),
(iterative_velocity, {'num_iterations':5, 'gamma':0.05}), (iterative_velocity, [5, 0.05]),
(smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5])
# TODO (jerk_sliding), because with the test cases here (len < 1000) it would just be a duplicate of jerk
(smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]),
(jerk_sliding, {'gamma':1e2, 'solver':'CLARABEL'}), (jerk_sliding, [1e2], {'solver':'CLARABEL'})
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The OSQP solver was giving me trouble in the CI again.

]

# All the testing methodology follows the exact same pattern; the only thing that changes is the
Expand Down Expand Up @@ -184,7 +185,13 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs)
[(-2, -2), (-1, -1), (-1, -1), (0, -1)],
[(0, 0), (1, 0), (0, -1), (1, 0)],
[(1, 1), (2, 2), (1, 1), (2, 2)],
[(1, 1), (3, 3), (1, 1), (3, 3)]]
[(1, 1), (3, 3), (1, 1), (3, 3)]],
jerk_sliding: [[(-13, -14), (-12, -13), (0, -1), (1, 0)],
[(-12, -13), (-12, -12), (0, -1), (0, 0)],
[(-13, -14), (-12, -13), (0, -1), (0, 0)],
[(-1, -2), (0, 0), (0, -1), (1, 0)],
[(0, 0), (2, 1), (0, 0), (2, 1)],
[(1, 1), (3, 3), (1, 1), (3, 3)]]
}

# Essentially run the cartesian product of [diff methods] x [test functions] through this one test
Expand All @@ -203,9 +210,16 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re
except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return

# sample the true function and make noisy samples, and sample true derivative
x = f(t)
x_noisy = x + noise
dxdt = df(t)
if diff_method != jerk_sliding:
x = f(t)
x_noisy = x + noise
dxdt = df(t)
dt = t[1] - t[0]
else: # different density for jerk_sliding
x = f(ttt)
x_noisy = x + long_noise
dxdt = df(ttt)
dt = ttt[1] - ttt[0]

# differentiate without and with noise, accounting for new and old styles of calling functions
x_hat, dxdt_hat = diff_method(x, dt, **params) if isinstance(params, dict) \
Expand Down
2 changes: 1 addition & 1 deletion pynumdiff/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def identity(x, dt): return x, 0 # should come back the same
x = np.arange(100)
kernel = utility.gaussian_kernel(9)

x_hat, dxdt_hat = utility.slide_function(identity, x, 0.1, kernel, step_size=2)
x_hat, dxdt_hat = utility.slide_function(identity, x, 0.1, kernel, stride=2)
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I decided to rename this parameter. I like stride better, because dt is a step size. Less ambiguous.


assert np.allclose(x, x_hat)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None):
dxdt_hat = y/dt # y only holds the dx values; to get deriv scale by dt
x_hat = np.cumsum(y) + integration_constants.value[order-1] # smoothed data

dxdt_hat = (dxdt_hat[0:-1] + dxdt_hat[1:])/2 # take first order FD to smooth a touch
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No need for that little 0.

dxdt_hat = (dxdt_hat[:-1] + dxdt_hat[1:])/2 # take first order FD to smooth a touch
ddxdt_hat_f = dxdt_hat[-1] - dxdt_hat[-2]
dxdt_hat_f = dxdt_hat[-1] + ddxdt_hat_f # What is this doing? Could we use a 2nd order FD above natively?
dxdt_hat = np.hstack((dxdt_hat, dxdt_hat_f))
Expand All @@ -106,7 +106,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None):
d = dxdt_hat[2] - dxdt_hat[1]
dxdt_hat[0] = dxdt_hat[1] - d

return x_hat*std+mean, dxdt_hat*std
return x_hat*std+mean, dxdt_hat*std # derivative is linear, so scale derivative by std


def velocity(x, dt, params=None, options=None, gamma=None, solver=None):
Expand Down Expand Up @@ -253,65 +253,9 @@ def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None):
elif gamma == None:
raise ValueError("`gamma` must be given.")

window_size = 1000
stride = 200
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hard-coding these makes this method less usable.


if len(x) < window_size:
if len(x) <= 100:
warn("len(x) <= 1000, calling standard jerk() without sliding")
return _total_variation_regularized_derivative(x, dt, 3, gamma, solver=solver)

# slide the jerk
final_xsmooth = []
final_xdot_hat = []
first_idx = 0
final_idx = first_idx + window_size
last_loop = False

final_weighting = []

try:
while not last_loop:
xsmooth, xdot_hat = _total_variation_regularized_derivative(x[first_idx:final_idx], dt, 3,
gamma, solver=solver)

xsmooth = np.hstack(([0]*first_idx, xsmooth, [0]*(len(x)-final_idx)))
final_xsmooth.append(xsmooth)

xdot_hat = np.hstack(([0]*first_idx, xdot_hat, [0]*(len(x)-final_idx)))
final_xdot_hat.append(xdot_hat)

# blending
w = np.hstack(([0]*first_idx,
np.arange(1, 201)/200,
[1]*(final_idx-first_idx-400),
(np.arange(1, 201)/200)[::-1],
[0]*(len(x)-final_idx)))
final_weighting.append(w)

if final_idx >= len(x):
last_loop = True
else:
first_idx += stride
final_idx += stride
if final_idx > len(x):
final_idx = len(x)
if final_idx - first_idx < 200:
first_idx -= (200 - (final_idx - first_idx))

# normalize columns
weights = np.vstack(final_weighting)
for c in range(weights.shape[1]):
weights[:, c] /= np.sum(weights[:, c])

# weighted sums
xsmooth = np.vstack(final_xsmooth)
xsmooth = np.sum(xsmooth*weights, axis=0)

xdot_hat = np.vstack(final_xdot_hat)
xdot_hat = np.sum(xdot_hat*weights, axis=0)

return xsmooth, xdot_hat

except ValueError:
warn('Solver failed, returning finite difference instead')
from pynumdiff.finite_difference import second_order
return second_order(x, dt)
kernel = np.hstack((np.arange(1, 21)/20, np.ones(60), (np.arange(0, 21)/20)[::-1]))
return utility.slide_function(_total_variation_regularized_derivative, x, dt, kernel, 3, gamma, stride=20, solver=solver)
8 changes: 4 additions & 4 deletions pynumdiff/utils/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,15 +176,15 @@ def convolutional_smoother(x, kernel, iterations=1):
return x_hat[len(x):len(x)*2]


def slide_function(func, x, dt, kernel, *args, step_size=1, pass_weights=False, **kwargs):
def slide_function(func, x, dt, kernel, *args, stride=1, pass_weights=False, **kwargs):
"""Slide a smoothing derivative function across a timeseries with specified window size.

:param callable func: name of the function to slide
:param np.array[float] x: data to differentiate
:param float dt: step size
:param np.array[float] kernel: values to weight the sliding window
:param list args: passed to func
:param int step_size: step size for slide (e.g. 1 means slide by 1 step)
:param int stride: step size for slide (e.g. 1 means slide by 1 step)
:param bool pass_weights: whether weights should be passed to func via update to kwargs
:param dict kwargs: passed to func

Expand All @@ -195,11 +195,11 @@ def slide_function(func, x, dt, kernel, *args, step_size=1, pass_weights=False,
if len(kernel) % 2 == 0: raise ValueError("Kernel window size should be odd.")
half_window_size = (len(kernel) - 1)//2 # int because len(kernel) is always odd

weights = np.zeros((int(np.ceil(len(x)/step_size)), len(x)))
weights = np.zeros((int(np.ceil(len(x)/stride)), len(x)))
x_hats = np.zeros(weights.shape)
dxdt_hats = np.zeros(weights.shape)

for i,midpoint in enumerate(range(0, len(x), step_size)): # iterate window midpoints
for i,midpoint in enumerate(range(0, len(x), stride)): # iterate window midpoints
# find where to index data and kernel, taking care at edges
window = slice(max(0, midpoint - half_window_size),
min(len(x), midpoint + half_window_size + 1)) # +1 because slicing works [,)
Expand Down