-
Notifications
You must be signed in to change notification settings - Fork 24
Jerk sliding slide #117
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Jerk sliding slide #117
Changes from 8 commits
217867e
94727bc
58f6677
7ec39c5
a176c07
fca5afe
6d39f81
9bb86a6
62d680b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4,18 +4,19 @@ | |
| from warnings import warn | ||
|
|
||
| from ..linear_model import lineardiff, polydiff, savgoldiff, spectraldiff | ||
| from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration | ||
| from ..total_variation_regularization import velocity, acceleration, jerk, iterative_velocity, smooth_acceleration, jerk_sliding | ||
| from ..kalman_smooth import constant_velocity, constant_acceleration, constant_jerk | ||
| from ..smooth_finite_difference import mediandiff, meandiff, gaussiandiff, friedrichsdiff, butterdiff, splinediff | ||
| from ..finite_difference import first_order, second_order | ||
| # Function aliases for testing cases where parameters change the behavior in a big way | ||
| def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) | ||
|
|
||
| dt = 0.1 | ||
| t = np.arange(0, 3+dt, dt) # sample locations, including the endpoint | ||
| t = np.linspace(0, 3, 31) # sample locations, including the endpoint | ||
| tt = np.linspace(0, 3) # full domain, for visualizing denser plots | ||
| ttt = np.linspace(0, 3, 201) # for testing jerk_sliding, which requires more points | ||
| np.random.seed(7) # for repeatability of the test, so we don't get random failures | ||
| noise = 0.05*np.random.randn(*t.shape) | ||
| long_noise = 0.05*np.random.randn(*ttt.shape) | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I define noise up here before anything runs for consistency. Otherwise I was finding I'd get different answers when running a test on its own versus with everyone else.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ended up adding |
||
|
|
||
| # Analytic (function, derivative) pairs on which to test differentiation methods. | ||
| test_funcs_and_derivs = [ | ||
|
|
@@ -51,8 +52,8 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) | |
| (acceleration, {'gamma':1}), (acceleration, [1]), | ||
| (jerk, {'gamma':10}), (jerk, [10]), | ||
| (iterative_velocity, {'num_iterations':5, 'gamma':0.05}), (iterative_velocity, [5, 0.05]), | ||
| (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]) | ||
| # TODO (jerk_sliding), because with the test cases here (len < 1000) it would just be a duplicate of jerk | ||
| (smooth_acceleration, {'gamma':2, 'window_size':5}), (smooth_acceleration, [2, 5]), | ||
| (jerk_sliding, {'gamma':1e2, 'solver':'CLARABEL'}), (jerk_sliding, [1e2], {'solver':'CLARABEL'}) | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The OSQP solver was giving me trouble in the CI again. |
||
| ] | ||
|
|
||
| # All the testing methodology follows the exact same pattern; the only thing that changes is the | ||
|
|
@@ -184,7 +185,13 @@ def iterated_first_order(*args, **kwargs): return first_order(*args, **kwargs) | |
| [(-2, -2), (-1, -1), (-1, -1), (0, -1)], | ||
| [(0, 0), (1, 0), (0, -1), (1, 0)], | ||
| [(1, 1), (2, 2), (1, 1), (2, 2)], | ||
| [(1, 1), (3, 3), (1, 1), (3, 3)]] | ||
| [(1, 1), (3, 3), (1, 1), (3, 3)]], | ||
| jerk_sliding: [[(-13, -14), (-12, -13), (0, -1), (1, 0)], | ||
| [(-12, -13), (-12, -12), (0, -1), (0, 0)], | ||
| [(-13, -14), (-12, -13), (0, -1), (0, 0)], | ||
| [(-1, -2), (0, 0), (0, -1), (1, 0)], | ||
| [(0, 0), (2, 1), (0, 0), (2, 1)], | ||
| [(1, 1), (3, 3), (1, 1), (3, 3)]] | ||
| } | ||
|
|
||
| # Essentially run the cartesian product of [diff methods] x [test functions] through this one test | ||
|
|
@@ -203,9 +210,16 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re | |
| except: warn(f"Cannot import cvxpy, skipping {diff_method} test."); return | ||
|
|
||
| # sample the true function and make noisy samples, and sample true derivative | ||
| x = f(t) | ||
| x_noisy = x + noise | ||
| dxdt = df(t) | ||
| if diff_method != jerk_sliding: | ||
| x = f(t) | ||
| x_noisy = x + noise | ||
| dxdt = df(t) | ||
| dt = t[1] - t[0] | ||
| else: # different density for jerk_sliding | ||
| x = f(ttt) | ||
| x_noisy = x + long_noise | ||
| dxdt = df(ttt) | ||
| dt = ttt[1] - ttt[0] | ||
|
|
||
| # differentiate without and with noise, accounting for new and old styles of calling functions | ||
| x_hat, dxdt_hat = diff_method(x, dt, **params) if isinstance(params, dict) \ | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -73,7 +73,7 @@ def identity(x, dt): return x, 0 # should come back the same | |
| x = np.arange(100) | ||
| kernel = utility.gaussian_kernel(9) | ||
|
|
||
| x_hat, dxdt_hat = utility.slide_function(identity, x, 0.1, kernel, step_size=2) | ||
| x_hat, dxdt_hat = utility.slide_function(identity, x, 0.1, kernel, stride=2) | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I decided to rename this parameter. I like |
||
|
|
||
| assert np.allclose(x, x_hat) | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -97,7 +97,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None): | |
| dxdt_hat = y/dt # y only holds the dx values; to get deriv scale by dt | ||
| x_hat = np.cumsum(y) + integration_constants.value[order-1] # smoothed data | ||
|
|
||
| dxdt_hat = (dxdt_hat[0:-1] + dxdt_hat[1:])/2 # take first order FD to smooth a touch | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No need for that little 0. |
||
| dxdt_hat = (dxdt_hat[:-1] + dxdt_hat[1:])/2 # take first order FD to smooth a touch | ||
| ddxdt_hat_f = dxdt_hat[-1] - dxdt_hat[-2] | ||
| dxdt_hat_f = dxdt_hat[-1] + ddxdt_hat_f # What is this doing? Could we use a 2nd order FD above natively? | ||
| dxdt_hat = np.hstack((dxdt_hat, dxdt_hat_f)) | ||
|
|
@@ -106,7 +106,7 @@ def _total_variation_regularized_derivative(x, dt, order, gamma, solver=None): | |
| d = dxdt_hat[2] - dxdt_hat[1] | ||
| dxdt_hat[0] = dxdt_hat[1] - d | ||
|
|
||
| return x_hat*std+mean, dxdt_hat*std | ||
| return x_hat*std+mean, dxdt_hat*std # derivative is linear, so scale derivative by std | ||
|
|
||
|
|
||
| def velocity(x, dt, params=None, options=None, gamma=None, solver=None): | ||
|
|
@@ -253,65 +253,9 @@ def jerk_sliding(x, dt, params=None, options=None, gamma=None, solver=None): | |
| elif gamma == None: | ||
| raise ValueError("`gamma` must be given.") | ||
|
|
||
| window_size = 1000 | ||
| stride = 200 | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hard-coding these makes this method less usable. |
||
|
|
||
| if len(x) < window_size: | ||
| if len(x) <= 100: | ||
| warn("len(x) <= 1000, calling standard jerk() without sliding") | ||
| return _total_variation_regularized_derivative(x, dt, 3, gamma, solver=solver) | ||
|
|
||
| # slide the jerk | ||
| final_xsmooth = [] | ||
| final_xdot_hat = [] | ||
| first_idx = 0 | ||
| final_idx = first_idx + window_size | ||
| last_loop = False | ||
|
|
||
| final_weighting = [] | ||
|
|
||
| try: | ||
| while not last_loop: | ||
| xsmooth, xdot_hat = _total_variation_regularized_derivative(x[first_idx:final_idx], dt, 3, | ||
| gamma, solver=solver) | ||
|
|
||
| xsmooth = np.hstack(([0]*first_idx, xsmooth, [0]*(len(x)-final_idx))) | ||
| final_xsmooth.append(xsmooth) | ||
|
|
||
| xdot_hat = np.hstack(([0]*first_idx, xdot_hat, [0]*(len(x)-final_idx))) | ||
| final_xdot_hat.append(xdot_hat) | ||
|
|
||
| # blending | ||
| w = np.hstack(([0]*first_idx, | ||
| np.arange(1, 201)/200, | ||
| [1]*(final_idx-first_idx-400), | ||
| (np.arange(1, 201)/200)[::-1], | ||
| [0]*(len(x)-final_idx))) | ||
| final_weighting.append(w) | ||
|
|
||
| if final_idx >= len(x): | ||
| last_loop = True | ||
| else: | ||
| first_idx += stride | ||
| final_idx += stride | ||
| if final_idx > len(x): | ||
| final_idx = len(x) | ||
| if final_idx - first_idx < 200: | ||
| first_idx -= (200 - (final_idx - first_idx)) | ||
|
|
||
| # normalize columns | ||
| weights = np.vstack(final_weighting) | ||
| for c in range(weights.shape[1]): | ||
| weights[:, c] /= np.sum(weights[:, c]) | ||
|
|
||
| # weighted sums | ||
| xsmooth = np.vstack(final_xsmooth) | ||
| xsmooth = np.sum(xsmooth*weights, axis=0) | ||
|
|
||
| xdot_hat = np.vstack(final_xdot_hat) | ||
| xdot_hat = np.sum(xdot_hat*weights, axis=0) | ||
|
|
||
| return xsmooth, xdot_hat | ||
|
|
||
| except ValueError: | ||
| warn('Solver failed, returning finite difference instead') | ||
| from pynumdiff.finite_difference import second_order | ||
| return second_order(x, dt) | ||
| kernel = np.hstack((np.arange(1, 21)/20, np.ones(60), (np.arange(0, 21)/20)[::-1])) | ||
| return utility.slide_function(_total_variation_regularized_derivative, x, dt, kernel, 3, gamma, stride=20, solver=solver) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I briefly had some other vectors up here, but switching between them in the test is annoying, so I've tried to keep everyone on this one
tanddt.