1- #!/usr/bin/env python
21# pylint: skip-file
32
43# u = TVRegDiff( data, iter, alph, u0, scale, ep, dx, plotflag, diagflag );
1413#
1514# iter Number of iterations to run the main loop. A stopping
1615# condition based on the norm of the gradient vector g
17- # below would be an easy modification. No default value.
16+ # below would be an easy modification. No default value.
1817#
19- # alph Regularization parameter. This is the main parameter
20- # to fiddle with. Start by varying by orders of
18+ # alph Regularization parameter. This is the main parameter
19+ # to fiddle with. Start by varying by orders of
2120# magnitude until reasonable results are obtained. A
2221# value to the nearest power of 10 is usally adequate.
23- # No default value. Higher values increase
22+ # No default value. Higher values increase
2423# regularization strenght and improve conditioning.
2524#
2625# u0 Initialization of the iteration. Default value is the
3130# conditioning issues when the linear system is solved.
3231#
3332# scale 'large' or 'small' (case insensitive). Default is
34- # 'small'. 'small' has somewhat better boundary
33+ # 'small'. 'small' has somewhat better boundary
3534# behavior, but becomes unwieldly for data larger than
36- # 1000 entries or so. 'large' has simpler numerics but
35+ # 1000 entries or so. 'large' has simpler numerics but
3736# is more efficient for large-scale problems. 'large' is
3837# more readily modified for higher-order derivatives,
3938# since the implicit differentiation matrix is square.
4039#
4140# ep Parameter for avoiding division by zero. Default value
42- # is 1e-6. Results should not be very sensitive to the
43- # value. Larger values improve conditioning and
41+ # is 1e-6. Results should not be very sensitive to the
42+ # value. Larger values improve conditioning and
4443# therefore speed, while smaller values give more
4544# accurate results with sharper jumps.
4645#
4746# dx Grid spacing, used in the definition of the derivative
48- # operators. Default is the reciprocal of the data size.
47+ # operators. Default is the reciprocal of the data size.
4948#
5049# plotflag Flag whether to display plot at each iteration.
51- # Default is 1 (yes). Useful, but adds significant
50+ # Default is 1 (yes). Useful, but adds significant
5251# running time.
5352#
5453# diagflag Flag whether to display diagnostics at each
55- # iteration. Default is 1 (yes). Useful for diagnosing
56- # preconditioning problems. When tolerance is not met,
54+ # iteration. Default is False. Useful for diagnosing
55+ # preconditioning problems. When tolerance is not met,
5756# an early iterate being best is more worrying than a
5857# large relative residual.
5958#
145144#########################################################
146145
147146import sys
148-
149- try :
150- import numpy as np
151- import scipy as sp
152- from scipy import sparse
153- from scipy .sparse import linalg as splin
154- except ImportError :
155- sys .exit ("Numpy and Scipy must be installed for TVRegDiag to work - "
156- "aborting" )
157-
158- _has_matplotlib = True
159-
160- try :
161- import matplotlib .pyplot as plt
162- except ImportError :
163- _has_matplotlib = False
164- print ("Matplotlib is not installed - plotting functionality disabled" )
165-
166- # Utility function.
167-
168-
169- def chop (v ):
170- return v [1 :]
147+ import matplotlib .pyplot as plt
148+ import numpy as np
149+ import scipy as sp
150+ from scipy import sparse
151+ from scipy .sparse import linalg as splin
171152
172153
173154def TVRegDiff (data , itern , alph , u0 = None , scale = 'small' , ep = 1e-6 , dx = None ,
174- plotflag = _has_matplotlib , diagflag = 1 , maxit = None ):
175- if maxit is None :
176- maxit = len (data )
177-
178- # code starts here
179- # Make sure we have a column vector
155+ plotflag = True , diagflag = False , maxit = None ):
156+ # Input checking
180157 data = np .array (data )
181- if (len (data .shape ) != 1 ):
182- print ("Error - data is not a column vector" )
183- return
184- # Get the data size.
158+ if (len (data .shape ) != 1 ): raise ValueError ("Data is must be a column vector" )
159+ if maxit is None : maxit = len (data )
185160 n = len (data )
186-
187- # Default checking. (u0 is done separately within each method.)
188- if dx is None :
189- dx = 1.0 / n
161+ if dx is None : dx = 1.0 / n
190162
191163 # Different methods for small- and large-scale problems.
192- if (scale .lower () == 'small' ):
193-
164+ if scale .lower () == 'small' :
194165 # Construct differentiation matrix.
195166 c = np .ones (n + 1 ) / dx
196167 D = sparse .spdiags ([- c , c ], [0 , 1 ], n , n + 1 )
197168
198- DT = D .transpose ()
199-
200169 # Construct antidifferentiation operator and its adjoint.
201- def A (x ): return chop (np .cumsum (x ) - 0.5 * (x + x [0 ])) * dx
170+ def A (x ): return (np .cumsum (x ) - 0.5 * (x + x [0 ]))[ 1 :] * dx
202171
203172 def AT (w ): return (sum (w ) * np .ones (n + 1 ) -
204173 np .transpose (np .concatenate (([sum (w ) / 2.0 ],
205174 np .cumsum (w ) -
206175 w / 2.0 )))) * dx
207176
208177 # Default initialization is naive derivative
209-
210- if u0 is None :
211- u0 = np .concatenate (([0 ], np .diff (data ), [0 ]))
212-
178+ if u0 is None : u0 = np .concatenate (([0 ], np .diff (data ), [0 ]))
213179 u = u0
214180 # Since Au( 0 ) = 0, we need to adjust.
215181 ofst = data [0 ]
216182 # Precompute.
217- ATb = AT (ofst - data ) # input: size n
183+ ATb = AT (ofst - data ) # input: size n
218184
219185 # Main loop.
220186 for ii in range (1 , itern + 1 ):
221187 # Diagonal matrix of weights, for linearizing E-L equation.
222188 Q = sparse .spdiags (1. / (np .sqrt ((D * u )** 2 + ep )), 0 , n , n )
223189 # Linearized diffusion matrix, also approximation of Hessian.
224- L = dx * DT * Q * D
225-
190+ L = dx * D .T * Q * D
226191
227192 # Gradient of functional.
228193 g = AT (A (u )) + ATb + alph * L * u
@@ -235,34 +200,20 @@ def AT(w): return (sum(w) * np.ones(n + 1) -
235200 def linop (v ): return (alph * L * v + AT (A (v )))
236201 linop = splin .LinearOperator ((n + 1 , n + 1 ), linop )
237202
203+ [s , info_i ] = sparse .linalg .cg (linop , g , x0 = None , rtol = rtol , maxiter = maxit , callback = None , M = P , atol = 0 )
238204 if diagflag :
239- [s , info_i ] = sparse .linalg .cg (
240- linop , g , x0 = None , rtol = rtol , maxiter = maxit , callback = None ,
241- M = P , atol = 0 )
242- #print('iteration {0:4d}: relative change = {1:.3e}, '
243- # 'gradient norm = {2:.3e}\n'.format(ii,
244- # np.linalg.norm(
245- # s[0]) /
246- # np.linalg.norm(u),
247- # np.linalg.norm(g)))
248- #if (info_i > 0):
249- # print("WARNING - convergence to tolerance not achieved!")
250- #elif (info_i < 0):
251- # print("WARNING - illegal input or breakdown")
252- else :
253- [s , info_i ] = sparse .linalg .cg (
254- linop , g , x0 = None , rtol = rtol , maxiter = maxit , callback = None ,
255- M = P , atol = 0 )
205+ print ('iteration {0:4d}: relative change = {1:.3e}, gradient norm = {2:.3e}\n ' .format (
206+ ii , np .linalg .norm (s [0 ])/ np .linalg .norm (u ), np .linalg .norm (g )))
207+ if (info_i > 0 ): print ("WARNING - convergence to tolerance not achieved!" )
208+ elif (info_i < 0 ): print ("WARNING - illegal input or breakdown" )
209+
256210 # Update solution.
257211 u = u - s
258212 # Display plot.
259- if plotflag :
260- plt .plot (u )
261- plt .show ()
262- u = u [0 :- 1 ]
263-
264- elif (scale .lower () == 'large' ):
213+ if plotflag : plt .plot (u ); plt .show ()
214+ u = u [:- 1 ]
265215
216+ elif scale .lower () == 'large' :
266217 # Construct antidifferentiation operator and its adjoint.
267218 def A (v ): return np .cumsum (v )
268219
@@ -275,12 +226,10 @@ def AT(w): return (sum(w) * np.ones(len(w)) -
275226 mask = np .ones ((n , n ))
276227 mask [- 1 , - 1 ] = 0.0
277228 D = sparse .dia_matrix (D .multiply (mask ))
278- DT = D .transpose ()
279229 # Since Au( 0 ) = 0, we need to adjust.
280230 data = data - data [0 ]
281231 # Default initialization is naive derivative.
282- if u0 is None :
283- u0 = np .concatenate (([0 ], np .diff (data )))
232+ if u0 is None : u0 = np .concatenate (([0 ], np .diff (data )))
284233 u = u0
285234 # Precompute.
286235 ATd = AT (data )
@@ -290,7 +239,7 @@ def AT(w): return (sum(w) * np.ones(len(w)) -
290239 # Diagonal matrix of weights, for linearizing E-L equation.
291240 Q = sparse .spdiags (1. / np .sqrt ((D * u )** 2.0 + ep ), 0 , n , n )
292241 # Linearized diffusion matrix, also approximation of Hessian.
293- L = DT * Q * D
242+ L = D . T * Q * D
294243 # Gradient of functional.
295244 g = AT (A (u )) - ATd
296245 g = g + alph * L * u
@@ -305,57 +254,34 @@ def AT(w): return (sum(w) * np.ones(len(w)) -
305254 def linop (v ): return (alph * L * v + AT (A (v )))
306255 linop = splin .LinearOperator ((n , n ), linop )
307256
308- print ( maxit )
257+ [ s , info_i ] = sparse . linalg . cg ( linop , - g , x0 = None , rtol = rtol , maxiter = maxit , callback = None , M = ( R . T @ R ), atol = 0 )
309258 if diagflag :
310- [s , info_i ] = sparse .linalg .cg (
311- linop , - g , x0 = None , rtol = rtol , maxiter = maxit , callback = None ,
312- M = np .dot (R .transpose (), R ), atol = 0 )
313- print ('iteration {0:4d}: relative change = {1:.3e}, '
314- 'gradient norm = {2:.3e}\n ' .format (ii ,
315- np .linalg .norm (s [0 ]) /
316- np .linalg .norm (u ),
317- np .linalg .norm (g )))
318- if (info_i > 0 ):
319- print ("WARNING - convergence to tolerance not achieved!" )
320- elif (info_i < 0 ):
321- print ("WARNING - illegal input or breakdown" )
259+ print ('iteration {0:4d}: relative change = {1:.3e}, gradient norm = {2:.3e}\n ' .format (
260+ ii , np .linalg .norm (s [0 ])/ np .linalg .norm (u ), np .linalg .norm (g )))
261+ if (info_i > 0 ): print ("WARNING - convergence to tolerance not achieved!" )
262+ elif (info_i < 0 ): print ("WARNING - illegal input or breakdown" )
322263
323- else :
324- [s , info_i ] = sparse .linalg .cg (
325- linop , - g , x0 = None , rtol = rtol , maxiter = maxit , callback = None ,
326- M = np .dot (R .transpose (), R ), atol = 0 )
327264 # Update current solution
328265 u = u + s
329266 # Display plot.
330- if plotflag :
331- plt .plot (u / dx )
332- plt .show ()
333-
267+ if plotflag : plt .plot (u / dx ); plt .show ()
334268 u = u / dx
335269
336270 return u
337271
338- # Small testing script
339-
340-
341- if __name__ == "__main__" :
342-
272+ if __name__ == "__main__" : # Small testing script
343273 dx = 0.05
344274 x0 = np .arange (0 , 2.0 * np .pi , dx )
345275
346276 testf = np .sin (x0 )
277+ testf += np .random .normal (0.0 , 0.04 , x0 .shape )
347278
348- testf = testf + np .random .normal (0.0 , 0.04 , x0 .shape )
349-
350- deriv_sm = TVRegDiff (testf , 1 , 5e-2 , dx = dx ,
351- ep = 1e-1 , scale = 'small' , plotflag = 0 )
352- deriv_lrg = TVRegDiff (testf , 100 , 1e-1 , dx = dx ,
353- ep = 1e-2 , scale = 'large' , plotflag = 0 )
279+ deriv_sm = TVRegDiff (testf , 1 , 5e-2 , dx = dx , ep = 1e-1 , scale = 'small' , plotflag = False , diagflag = True )
280+ deriv_lrg = TVRegDiff (testf , 100 , 1e-1 , dx = dx , ep = 1e-2 , scale = 'large' , plotflag = False , diagflag = True )
354281
355- if (_has_matplotlib ):
356- plt .plot (np .cos (x0 ), label = 'Analytical' , c = (0 ,0 ,0 ))
357- plt .plot (np .gradient (testf , dx ), label = 'numpy.gradient' )
358- plt .plot (deriv_sm , label = 'TVRegDiff (small)' )
359- plt .plot (deriv_lrg , label = 'TVRegDiff (large)' )
360- plt .legend ()
361- plt .show ()
282+ plt .plot (np .cos (x0 ), label = 'Analytical' , c = (0 ,0 ,0 ))
283+ plt .plot (np .gradient (testf , dx ), label = 'numpy.gradient' )
284+ plt .plot (deriv_sm , label = 'TVRegDiff (small)' )
285+ plt .plot (deriv_lrg , label = 'TVRegDiff (large)' )
286+ plt .legend ()
287+ plt .show ()
0 commit comments