Skip to content

Commit bdf674b

Browse files
authored
Merge pull request #89 from florisvb/fix-first-value
addressing #88
2 parents d0f6cd4 + 01cf531 commit bdf674b

File tree

4 files changed

+35
-28
lines changed

4 files changed

+35
-28
lines changed

pynumdiff/tests/test_optimize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ def test_butterdiff():
8080
np.testing.assert_array_less( np.abs(params_1[0] - 9), 1.001, err_msg=get_err_msg(params_1, [9, 0.157]))
8181
np.testing.assert_array_less( np.abs(params_1[1] - 0.157), 0.01, err_msg=get_err_msg(params_1, [9, 0.157]))
8282
#np.testing.assert_almost_equal(params_1, [9, 0.157], decimal=3, err_msg=get_err_msg(params_1, [9, 0.157]))
83-
np.testing.assert_almost_equal(params_2, [2, 0.99], decimal=3, err_msg=get_err_msg(params_2, [2, 0.99]))
83+
np.testing.assert_almost_equal(params_2, [3, 0.99], decimal=3, err_msg=get_err_msg(params_2, [3, 0.99]))
8484

8585
def test_splinediff():
8686
params_1, val_1 = splinediff(x, dt, params=None, options={'iterate': True},

pynumdiff/tests/test_total_variation_regularization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def test_smooth_acceleration():
6868

6969
params = [5, 30]
7070
x_hat, dxdt_hat = smooth_acceleration(x, dt, params, options={'solver': 'CVXOPT'})
71-
x_smooth = np.array([4.16480747, 5.52913444, 6.77037146, 7.87267273, 8.79483088,
71+
x_smooth = np.array([4.2, 5.52913444, 6.77037146, 7.87267273, 8.79483088,
7272
9.5044844, 9.97926076, 10.20730827, 10.18728338, 9.92792114,
7373
9.44728533, 8.77174094, 7.93472066, 6.97538656, 5.93725369])
7474
dxdt = np.array([136.43269721, 129.9388182, 118.30858578, 102.15166804,

pynumdiff/tests/test_utils.py

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,27 @@
77
from pynumdiff.utils import utility, simulate, evaluate
88

99

10-
def test_utility():
11-
return# TODO. There are a lot of basic integration functionalities, etc. that deserve tests.
10+
def test_integrate_dxdt_hat():
11+
dt = 0.1
12+
for dxdt,expected in [(np.ones(10), np.arange(0, 1, 0.1)), # constant derivative
13+
(np.linspace(0, 1, 11), [0, 0.005, 0.02, 0.045, 0.08, 0.125, 0.18, 0.245, 0.32, 0.405, 0.5]), # linear derivative
14+
(np.array([1.0]), [0])]: # edge case of just one entry
15+
x_hat = utility.integrate_dxdt_hat(dxdt, dt)
16+
np.testing.assert_allclose(x_hat, expected)
17+
assert len(x_hat) == len(dxdt)
18+
19+
def test_estimate_initial_condition():
20+
for x,x_hat,c in [([1.0, 2.0, 3.0, 4.0, 5.0], [0.0, 1.0, 2.0, 3.0, 4.0], 1), # Perfect alignment case, xhat shifted by 1
21+
(np.ones(5)*10, np.ones(5)*5, 5),
22+
([0], [1], -1)]:
23+
x0 = utility.estimate_initial_condition(x, x_hat)
24+
np.testing.assert_allclose(x0, float(c), rtol=1e-3)
25+
26+
np.random.seed(42) # Noisy case. Seed for reproducibility
27+
x0 = utility.estimate_initial_condition([1.0, 2.0, 3.0, 4.0, 5.0],
28+
np.array([0.0, 1.0, 2.0, 3.0, 4.0]) + np.random.normal(0, 0.1, 5))
29+
assert 0.9 < x0 < 1.1 # The result should be close to 1.0, but not exactly due to noise
30+
1231

1332
def test_simulate():
1433
return

pynumdiff/utils/utility.py

Lines changed: 12 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -135,52 +135,40 @@ def integrate_dxdt_hat(dxdt_hat, dt):
135135
136136
:return: **x_hat** (np.array[float]) -- integral of dxdt_hat
137137
"""
138-
x = scipy.integrate.cumulative_trapezoid(dxdt_hat)
139-
first_value = x[0] - dxdt_hat[0]
140-
return np.hstack((first_value, x))*dt
138+
return np.hstack((0, scipy.integrate.cumulative_trapezoid(dxdt_hat)))*dt
141139

142140

143141
# Optimization routine to estimate the integration constant.
144142
def estimate_initial_condition(x, x_hat):
145-
"""
146-
Integration leaves an unknown integration constant. This function finds a best fit integration constant given x, and x_hat (the integral of dxdt_hat)
147-
148-
:param x: timeseries of measurements
149-
:type x: np.array
143+
"""Integration leaves an unknown integration constant. This function finds a best fit integration constant given x and
144+
x_hat (the integral of dxdt_hat) by optimizing :math:`\\min_c ||x - \\hat{x} + c||_2`.
150145
151-
:param x_hat: smoothed estiamte of x, for the purpose of this function this should have been determined by integrate_dxdt_hat
152-
:type x_hat: np.array
146+
:param np.array[float] x: timeseries of measurements
147+
:param np.array[float] x_hat: smoothed estiamte of x, for the purpose of this function this should have been determined
148+
by integrate_dxdt_hat
153149
154-
:return: integration constant (i.e. initial condition) that best aligns x_hat with x
155-
:rtype: float
150+
:return: **integration constant** (float) -- initial condition that best aligns x_hat with x
156151
"""
157-
def f(x0, *args):
158-
x, x_hat = args[0]
159-
error = np.linalg.norm(x - (x_hat+x0))
160-
return error
161-
result = scipy.optimize.minimize(f, [0], args=[x, x_hat], method='SLSQP')
162-
return result.x
152+
return scipy.optimize.minimize(lambda x0, x, xhat: np.linalg.norm(x - (x_hat+x0)), # fn to minimize in 1st argument
153+
0, args=(x, x_hat), method='SLSQP').x[0] # result is a vector, even if initial guess is just a scalar
163154

164155

165156
# kernels
166157
def _mean_kernel(window_size):
167-
"""A uniform boxcar of total integral 1
168-
"""
158+
"""A uniform boxcar of total integral 1"""
169159
return np.ones(window_size)/window_size
170160

171161

172162
def _gaussian_kernel(window_size):
173-
"""A truncated gaussian
174-
"""
163+
"""A truncated gaussian"""
175164
sigma = window_size / 6.
176165
t = np.linspace(-2.7*sigma, 2.7*sigma, window_size)
177166
ker = 1/np.sqrt(2*np.pi*sigma**2) * np.exp(-(t**2)/(2*sigma**2)) # gaussian function itself
178167
return ker / np.sum(ker)
179168

180169

181170
def _friedrichs_kernel(window_size):
182-
"""A bump function
183-
"""
171+
"""A bump function"""
184172
x = np.linspace(-0.999, 0.999, window_size)
185173
ker = np.exp(-1/(1-x**2))
186174
return ker / np.sum(ker)

0 commit comments

Comments
 (0)