Skip to content

Commit f36959a

Browse files
authored
Merge pull request #176 from florisvb/improve-test-coverage
improving coverage with some tricks and judicious choices
2 parents 5b17f19 + 98a5027 commit f36959a

7 files changed

Lines changed: 84 additions & 139 deletions

File tree

.coveragerc

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
[run]
2+
omit =
3+
pynumdiff/_version.py
4+
pynumdiff/tests/*
5+
pynumdiff/utils/old_pi_cruise_control.py
6+
7+
[report]
8+
# uses regex to match lines; lines that head a block or function exclude that whole thing
9+
exclude_lines =
10+
pragma: no cover
11+
if __name__ == .__main__.:
12+
if diagflag:
13+
if options != None:
14+
^def plot
15+
^def plot_comparison
16+
^def suggest_method
17+
def _lorenz_xyz_odeint
18+
^except ImportError:
19+

.github/workflows/test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ jobs:
1717
- name: tests and coverage
1818
run: | # some extra stuff here to make sure coverage works across multiprocessing
1919
pip install -e .[advanced,dev] coveralls
20-
coverage run --source=pynumdiff --omit="pynumdiff/_version.py,pynumdiff/tests/*,pynumdiff/utils/old_pi_cruise_control.py" -m pytest -s
20+
coverage run --source=pynumdiff -m pytest -s
2121
coverage xml
2222
- uses: coverallsapp/github-action@v2
2323
with:

pynumdiff/linear_model/_linear_model.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,17 @@
1212
except ImportError: pass
1313

1414

15-
def savgoldiff(*args, **kwargs):
15+
def savgoldiff(*args, **kwargs): # pragma: no cover
1616
warn("`savgoldiff` has moved to `polynomial_fit.savgoldiff` and will be removed from "
1717
+ "`linear_model` in a future release.", DeprecationWarning)
1818
return _savgoldiff(*args, **kwargs)
1919

20-
def polydiff(*args, **kwargs):
20+
def polydiff(*args, **kwargs): # pragma: no cover
2121
warn("`polydiff` has moved to `polynomial_fit.polydiff` and will be removed from "
2222
+ "`linear_model` in a future release.", DeprecationWarning)
2323
return _polydiff(*args, **kwargs)
2424

25-
def spectraldiff(*args, **kwargs):
25+
def spectraldiff(*args, **kwargs): # pragma: no cover
2626
warn("`spectraldiff` has moved to `basis_fit.spectraldiff` and will be removed from "
2727
+ "`linear_model` in a future release.", DeprecationWarning)
2828
return _spectraldiff(*args, **kwargs)

pynumdiff/polynomial_fit/_polynomial_fit.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,15 @@
55
from pynumdiff.utils import utility
66

77

8-
def splinediff(x, dt_or_t, params=None, options={}, degree=3, s=None, num_iterations=1):
8+
def splinediff(x, dt_or_t, params=None, options=None, degree=3, s=None, num_iterations=1):
99
"""Find smoothed data and derivative estimates by fitting a smoothing spline to the data with
1010
scipy.interpolate.UnivariateSpline. Variable step size is supported with equal ease as uniform step size.
1111
1212
:param np.array[float] x: data to differentiate
1313
:param float or array[float] dt_or_t: This function supports variable step size. This parameter is either the constant
1414
:math:`\\Delta t` if given as a single float, or data locations if given as an array of same length as :code:`x`.
1515
:param list params: (**deprecated**, prefer :code:`degree`, :code:`cutoff_freq`, and :code:`num_iterations`)
16-
:param dict options: (**deprecated**, prefer :code:`num_iterations`) an empty dictionary or {'iterate': (bool)}
16+
:param dict options: (**deprecated**, prefer :code:`num_iterations`) a dictionary of {'iterate': (bool)}
1717
:param int degree: polynomial degree of the spline. A kth degree spline can be differentiated k times.
1818
:param float s: positive smoothing factor used to choose the number of knots. Number of knots will be increased
1919
until the smoothing condition is satisfied: :math:`\\sum_t (x[t] - \\text{spline}[t])^2 \\leq s`
@@ -26,8 +26,8 @@ def splinediff(x, dt_or_t, params=None, options={}, degree=3, s=None, num_iterat
2626
warn("`params` and `options` parameters will be removed in a future version. Use `order`, `s`, and " +
2727
"`num_iterations` instead.", DeprecationWarning)
2828
degree, s = params[0:2]
29-
if 'iterate' in options and options['iterate']:
30-
num_iterations = params[2]
29+
if options != None:
30+
if 'iterate' in options and options['iterate']: num_iterations = params[2]
3131

3232
if np.isscalar(dt_or_t):
3333
t = np.arange(len(x))*dt_or_t

pynumdiff/smooth_finite_difference/_smooth_finite_difference.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ def butterdiff(x, dt, params=None, options={}, filter_order=2, cutoff_freq=0.5,
173173
return finite_difference(x_hat, dt)
174174

175175

176-
def splinediff(*args, **kwargs):
176+
def splinediff(*args, **kwargs): # pragma: no cover
177177
warn("`splindiff` has moved to `polynomial_fit.splinediff` and will be removed from "
178178
+ "`smooth_finite_difference` in a future release.", DeprecationWarning)
179179
return _splinediff(*args, **kwargs)
Lines changed: 54 additions & 128 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
#!/usr/bin/env python
21
# pylint: skip-file
32

43
# u = TVRegDiff( data, iter, alph, u0, scale, ep, dx, plotflag, diagflag );
@@ -14,13 +13,13 @@
1413
#
1514
# iter Number of iterations to run the main loop. A stopping
1615
# condition based on the norm of the gradient vector g
17-
# below would be an easy modification. No default value.
16+
# below would be an easy modification. No default value.
1817
#
19-
# alph Regularization parameter. This is the main parameter
20-
# to fiddle with. Start by varying by orders of
18+
# alph Regularization parameter. This is the main parameter
19+
# to fiddle with. Start by varying by orders of
2120
# magnitude until reasonable results are obtained. A
2221
# value to the nearest power of 10 is usally adequate.
23-
# No default value. Higher values increase
22+
# No default value. Higher values increase
2423
# regularization strenght and improve conditioning.
2524
#
2625
# u0 Initialization of the iteration. Default value is the
@@ -31,29 +30,29 @@
3130
# conditioning issues when the linear system is solved.
3231
#
3332
# scale 'large' or 'small' (case insensitive). Default is
34-
# 'small'. 'small' has somewhat better boundary
33+
# 'small'. 'small' has somewhat better boundary
3534
# behavior, but becomes unwieldly for data larger than
36-
# 1000 entries or so. 'large' has simpler numerics but
35+
# 1000 entries or so. 'large' has simpler numerics but
3736
# is more efficient for large-scale problems. 'large' is
3837
# more readily modified for higher-order derivatives,
3938
# since the implicit differentiation matrix is square.
4039
#
4140
# ep Parameter for avoiding division by zero. Default value
42-
# is 1e-6. Results should not be very sensitive to the
43-
# value. Larger values improve conditioning and
41+
# is 1e-6. Results should not be very sensitive to the
42+
# value. Larger values improve conditioning and
4443
# therefore speed, while smaller values give more
4544
# accurate results with sharper jumps.
4645
#
4746
# dx Grid spacing, used in the definition of the derivative
48-
# operators. Default is the reciprocal of the data size.
47+
# operators. Default is the reciprocal of the data size.
4948
#
5049
# plotflag Flag whether to display plot at each iteration.
51-
# Default is 1 (yes). Useful, but adds significant
50+
# Default is 1 (yes). Useful, but adds significant
5251
# running time.
5352
#
5453
# diagflag Flag whether to display diagnostics at each
55-
# iteration. Default is 1 (yes). Useful for diagnosing
56-
# preconditioning problems. When tolerance is not met,
54+
# iteration. Default is False. Useful for diagnosing
55+
# preconditioning problems. When tolerance is not met,
5756
# an early iterate being best is more worrying than a
5857
# large relative residual.
5958
#
@@ -145,84 +144,50 @@
145144
#########################################################
146145

147146
import sys
148-
149-
try:
150-
import numpy as np
151-
import scipy as sp
152-
from scipy import sparse
153-
from scipy.sparse import linalg as splin
154-
except ImportError:
155-
sys.exit("Numpy and Scipy must be installed for TVRegDiag to work - "
156-
"aborting")
157-
158-
_has_matplotlib = True
159-
160-
try:
161-
import matplotlib.pyplot as plt
162-
except ImportError:
163-
_has_matplotlib = False
164-
print("Matplotlib is not installed - plotting functionality disabled")
165-
166-
# Utility function.
167-
168-
169-
def chop(v):
170-
return v[1:]
147+
import matplotlib.pyplot as plt
148+
import numpy as np
149+
import scipy as sp
150+
from scipy import sparse
151+
from scipy.sparse import linalg as splin
171152

172153

173154
def TVRegDiff(data, itern, alph, u0=None, scale='small', ep=1e-6, dx=None,
174-
plotflag=_has_matplotlib, diagflag=1, maxit=None):
175-
if maxit is None:
176-
maxit = len(data)
177-
178-
# code starts here
179-
# Make sure we have a column vector
155+
plotflag=True, diagflag=False, maxit=None):
156+
# Input checking
180157
data = np.array(data)
181-
if (len(data.shape) != 1):
182-
print("Error - data is not a column vector")
183-
return
184-
# Get the data size.
158+
if (len(data.shape) != 1): raise ValueError("Data is must be a column vector")
159+
if maxit is None: maxit = len(data)
185160
n = len(data)
186-
187-
# Default checking. (u0 is done separately within each method.)
188-
if dx is None:
189-
dx = 1.0 / n
161+
if dx is None: dx = 1.0 / n
190162

191163
# Different methods for small- and large-scale problems.
192-
if (scale.lower() == 'small'):
193-
164+
if scale.lower() == 'small':
194165
# Construct differentiation matrix.
195166
c = np.ones(n + 1) / dx
196167
D = sparse.spdiags([-c, c], [0, 1], n, n + 1)
197168

198-
DT = D.transpose()
199-
200169
# Construct antidifferentiation operator and its adjoint.
201-
def A(x): return chop(np.cumsum(x) - 0.5 * (x + x[0])) * dx
170+
def A(x): return (np.cumsum(x) - 0.5 * (x + x[0]))[1:] * dx
202171

203172
def AT(w): return (sum(w) * np.ones(n + 1) -
204173
np.transpose(np.concatenate(([sum(w) / 2.0],
205174
np.cumsum(w) -
206175
w / 2.0)))) * dx
207176

208177
# Default initialization is naive derivative
209-
210-
if u0 is None:
211-
u0 = np.concatenate(([0], np.diff(data), [0]))
212-
178+
if u0 is None: u0 = np.concatenate(([0], np.diff(data), [0]))
213179
u = u0
214180
# Since Au( 0 ) = 0, we need to adjust.
215181
ofst = data[0]
216182
# Precompute.
217-
ATb = AT(ofst - data) # input: size n
183+
ATb = AT(ofst - data) # input: size n
218184

219185
# Main loop.
220186
for ii in range(1, itern+1):
221187
# Diagonal matrix of weights, for linearizing E-L equation.
222188
Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n)
223189
# Linearized diffusion matrix, also approximation of Hessian.
224-
L = dx * DT * Q * D
225-
190+
L = dx * D.T * Q * D
226191

227192
# Gradient of functional.
228193
g = AT(A(u)) + ATb + alph * L * u
@@ -235,34 +200,20 @@ def AT(w): return (sum(w) * np.ones(n + 1) -
235200
def linop(v): return (alph * L * v + AT(A(v)))
236201
linop = splin.LinearOperator((n + 1, n + 1), linop)
237202

203+
[s, info_i] = sparse.linalg.cg(linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None, M=P, atol=0)
238204
if diagflag:
239-
[s, info_i] = sparse.linalg.cg(
240-
linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None,
241-
M=P, atol=0)
242-
#print('iteration {0:4d}: relative change = {1:.3e}, '
243-
# 'gradient norm = {2:.3e}\n'.format(ii,
244-
# np.linalg.norm(
245-
# s[0]) /
246-
# np.linalg.norm(u),
247-
# np.linalg.norm(g)))
248-
#if (info_i > 0):
249-
# print("WARNING - convergence to tolerance not achieved!")
250-
#elif (info_i < 0):
251-
# print("WARNING - illegal input or breakdown")
252-
else:
253-
[s, info_i] = sparse.linalg.cg(
254-
linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None,
255-
M=P, atol=0)
205+
print('iteration {0:4d}: relative change = {1:.3e}, gradient norm = {2:.3e}\n'.format(
206+
ii, np.linalg.norm(s[0])/np.linalg.norm(u), np.linalg.norm(g)))
207+
if (info_i > 0): print("WARNING - convergence to tolerance not achieved!")
208+
elif (info_i < 0): print("WARNING - illegal input or breakdown")
209+
256210
# Update solution.
257211
u = u - s
258212
# Display plot.
259-
if plotflag:
260-
plt.plot(u)
261-
plt.show()
262-
u = u[0:-1]
263-
264-
elif (scale.lower() == 'large'):
213+
if plotflag: plt.plot(u); plt.show()
214+
u = u[:-1]
265215

216+
elif scale.lower() == 'large':
266217
# Construct antidifferentiation operator and its adjoint.
267218
def A(v): return np.cumsum(v)
268219

@@ -275,12 +226,10 @@ def AT(w): return (sum(w) * np.ones(len(w)) -
275226
mask = np.ones((n, n))
276227
mask[-1, -1] = 0.0
277228
D = sparse.dia_matrix(D.multiply(mask))
278-
DT = D.transpose()
279229
# Since Au( 0 ) = 0, we need to adjust.
280230
data = data - data[0]
281231
# Default initialization is naive derivative.
282-
if u0 is None:
283-
u0 = np.concatenate(([0], np.diff(data)))
232+
if u0 is None: u0 = np.concatenate(([0], np.diff(data)))
284233
u = u0
285234
# Precompute.
286235
ATd = AT(data)
@@ -290,7 +239,7 @@ def AT(w): return (sum(w) * np.ones(len(w)) -
290239
# Diagonal matrix of weights, for linearizing E-L equation.
291240
Q = sparse.spdiags(1. / np.sqrt((D*u)**2.0 + ep), 0, n, n)
292241
# Linearized diffusion matrix, also approximation of Hessian.
293-
L = DT*Q*D
242+
L = D.T*Q*D
294243
# Gradient of functional.
295244
g = AT(A(u)) - ATd
296245
g = g + alph * L * u
@@ -305,57 +254,34 @@ def AT(w): return (sum(w) * np.ones(len(w)) -
305254
def linop(v): return (alph * L * v + AT(A(v)))
306255
linop = splin.LinearOperator((n, n), linop)
307256

308-
print(maxit)
257+
[s, info_i] = sparse.linalg.cg(linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None, M=(R.T @ R), atol=0)
309258
if diagflag:
310-
[s, info_i] = sparse.linalg.cg(
311-
linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None,
312-
M=np.dot(R.transpose(), R), atol=0)
313-
print('iteration {0:4d}: relative change = {1:.3e}, '
314-
'gradient norm = {2:.3e}\n'.format(ii,
315-
np.linalg.norm(s[0]) /
316-
np.linalg.norm(u),
317-
np.linalg.norm(g)))
318-
if (info_i > 0):
319-
print("WARNING - convergence to tolerance not achieved!")
320-
elif (info_i < 0):
321-
print("WARNING - illegal input or breakdown")
259+
print('iteration {0:4d}: relative change = {1:.3e}, gradient norm = {2:.3e}\n'.format(
260+
ii, np.linalg.norm(s[0])/np.linalg.norm(u), np.linalg.norm(g)))
261+
if (info_i > 0): print("WARNING - convergence to tolerance not achieved!")
262+
elif (info_i < 0): print("WARNING - illegal input or breakdown")
322263

323-
else:
324-
[s, info_i] = sparse.linalg.cg(
325-
linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None,
326-
M=np.dot(R.transpose(), R), atol=0)
327264
# Update current solution
328265
u = u + s
329266
# Display plot.
330-
if plotflag:
331-
plt.plot(u/dx)
332-
plt.show()
333-
267+
if plotflag: plt.plot(u/dx); plt.show()
334268
u = u/dx
335269

336270
return u
337271

338-
# Small testing script
339-
340-
341-
if __name__ == "__main__":
342-
272+
if __name__ == "__main__": # Small testing script
343273
dx = 0.05
344274
x0 = np.arange(0, 2.0*np.pi, dx)
345275

346276
testf = np.sin(x0)
277+
testf += np.random.normal(0.0, 0.04, x0.shape)
347278

348-
testf = testf + np.random.normal(0.0, 0.04, x0.shape)
349-
350-
deriv_sm = TVRegDiff(testf, 1, 5e-2, dx=dx,
351-
ep=1e-1, scale='small', plotflag=0)
352-
deriv_lrg = TVRegDiff(testf, 100, 1e-1, dx=dx,
353-
ep=1e-2, scale='large', plotflag=0)
279+
deriv_sm = TVRegDiff(testf, 1, 5e-2, dx=dx, ep=1e-1, scale='small', plotflag=False, diagflag=True)
280+
deriv_lrg = TVRegDiff(testf, 100, 1e-1, dx=dx, ep=1e-2, scale='large', plotflag=False, diagflag=True)
354281

355-
if (_has_matplotlib):
356-
plt.plot(np.cos(x0), label='Analytical', c=(0,0,0))
357-
plt.plot(np.gradient(testf, dx), label='numpy.gradient')
358-
plt.plot(deriv_sm, label='TVRegDiff (small)')
359-
plt.plot(deriv_lrg, label='TVRegDiff (large)')
360-
plt.legend()
361-
plt.show()
282+
plt.plot(np.cos(x0), label='Analytical', c=(0,0,0))
283+
plt.plot(np.gradient(testf, dx), label='numpy.gradient')
284+
plt.plot(deriv_sm, label='TVRegDiff (small)')
285+
plt.plot(deriv_lrg, label='TVRegDiff (large)')
286+
plt.legend()
287+
plt.show()

0 commit comments

Comments
 (0)