-
Notifications
You must be signed in to change notification settings - Fork 22
improving coverage with some tricks and judicious choices #176
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
d12266d
a9473bb
cc684a5
98a5027
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,19 @@ | ||
| [run] | ||
| omit = | ||
| pynumdiff/_version.py | ||
| pynumdiff/tests/* | ||
| pynumdiff/utils/old_pi_cruise_control.py | ||
|
|
||
| [report] | ||
| # uses regex to match lines; lines that head a block or function exclude that whole thing | ||
| exclude_lines = | ||
| pragma: no cover | ||
| if __name__ == .__main__.: | ||
| if diagflag: | ||
| if options != None: | ||
| ^def plot | ||
| ^def plot_comparison | ||
| ^def suggest_method | ||
| def _lorenz_xyz_odeint | ||
| ^except ImportError: | ||
|
|
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -5,15 +5,15 @@ | |
| from pynumdiff.utils import utility | ||
|
|
||
|
|
||
| def splinediff(x, dt_or_t, params=None, options={}, degree=3, s=None, num_iterations=1): | ||
| def splinediff(x, dt_or_t, params=None, options=None, degree=3, s=None, num_iterations=1): | ||
| """Find smoothed data and derivative estimates by fitting a smoothing spline to the data with | ||
| scipy.interpolate.UnivariateSpline. Variable step size is supported with equal ease as uniform step size. | ||
|
|
||
| :param np.array[float] x: data to differentiate | ||
| :param float or array[float] dt_or_t: This function supports variable step size. This parameter is either the constant | ||
| :math:`\\Delta t` if given as a single float, or data locations if given as an array of same length as :code:`x`. | ||
| :param list params: (**deprecated**, prefer :code:`degree`, :code:`cutoff_freq`, and :code:`num_iterations`) | ||
| :param dict options: (**deprecated**, prefer :code:`num_iterations`) an empty dictionary or {'iterate': (bool)} | ||
| :param dict options: (**deprecated**, prefer :code:`num_iterations`) a dictionary of {'iterate': (bool)} | ||
| :param int degree: polynomial degree of the spline. A kth degree spline can be differentiated k times. | ||
| :param float s: positive smoothing factor used to choose the number of knots. Number of knots will be increased | ||
| until the smoothing condition is satisfied: :math:`\\sum_t (x[t] - \\text{spline}[t])^2 \\leq s` | ||
|
|
@@ -26,8 +26,8 @@ def splinediff(x, dt_or_t, params=None, options={}, degree=3, s=None, num_iterat | |
| warn("`params` and `options` parameters will be removed in a future version. Use `order`, `s`, and " + | ||
| "`num_iterations` instead.", DeprecationWarning) | ||
| degree, s = params[0:2] | ||
| if 'iterate' in options and options['iterate']: | ||
| num_iterations = params[2] | ||
| if options != None: | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This way these lines get skipped by coverage, because they're uninteresting and will be removed eventually anyway. |
||
| if 'iterate' in options and options['iterate']: num_iterations = params[2] | ||
|
|
||
| if np.isscalar(dt_or_t): | ||
| t = np.arange(len(x))*dt_or_t | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,4 +1,3 @@ | ||
| #!/usr/bin/env python | ||
| # pylint: skip-file | ||
|
|
||
| # u = TVRegDiff( data, iter, alph, u0, scale, ep, dx, plotflag, diagflag ); | ||
|
|
@@ -14,13 +13,13 @@ | |
| # | ||
| # iter Number of iterations to run the main loop. A stopping | ||
| # condition based on the norm of the gradient vector g | ||
| # below would be an easy modification. No default value. | ||
| # below would be an easy modification. No default value. | ||
| # | ||
| # alph Regularization parameter. This is the main parameter | ||
| # to fiddle with. Start by varying by orders of | ||
| # alph Regularization parameter. This is the main parameter | ||
| # to fiddle with. Start by varying by orders of | ||
| # magnitude until reasonable results are obtained. A | ||
| # value to the nearest power of 10 is usally adequate. | ||
| # No default value. Higher values increase | ||
| # No default value. Higher values increase | ||
| # regularization strenght and improve conditioning. | ||
| # | ||
| # u0 Initialization of the iteration. Default value is the | ||
|
|
@@ -31,29 +30,29 @@ | |
| # conditioning issues when the linear system is solved. | ||
| # | ||
| # scale 'large' or 'small' (case insensitive). Default is | ||
| # 'small'. 'small' has somewhat better boundary | ||
| # 'small'. 'small' has somewhat better boundary | ||
| # behavior, but becomes unwieldly for data larger than | ||
| # 1000 entries or so. 'large' has simpler numerics but | ||
| # 1000 entries or so. 'large' has simpler numerics but | ||
| # is more efficient for large-scale problems. 'large' is | ||
| # more readily modified for higher-order derivatives, | ||
| # since the implicit differentiation matrix is square. | ||
| # | ||
| # ep Parameter for avoiding division by zero. Default value | ||
| # is 1e-6. Results should not be very sensitive to the | ||
| # value. Larger values improve conditioning and | ||
| # is 1e-6. Results should not be very sensitive to the | ||
| # value. Larger values improve conditioning and | ||
| # therefore speed, while smaller values give more | ||
| # accurate results with sharper jumps. | ||
| # | ||
| # dx Grid spacing, used in the definition of the derivative | ||
| # operators. Default is the reciprocal of the data size. | ||
| # operators. Default is the reciprocal of the data size. | ||
| # | ||
| # plotflag Flag whether to display plot at each iteration. | ||
| # Default is 1 (yes). Useful, but adds significant | ||
| # Default is 1 (yes). Useful, but adds significant | ||
| # running time. | ||
| # | ||
| # diagflag Flag whether to display diagnostics at each | ||
| # iteration. Default is 1 (yes). Useful for diagnosing | ||
| # preconditioning problems. When tolerance is not met, | ||
| # iteration. Default is False. Useful for diagnosing | ||
| # preconditioning problems. When tolerance is not met, | ||
| # an early iterate being best is more worrying than a | ||
| # large relative residual. | ||
| # | ||
|
|
@@ -145,84 +144,50 @@ | |
| ######################################################### | ||
|
|
||
| import sys | ||
|
|
||
| try: | ||
| import numpy as np | ||
| import scipy as sp | ||
| from scipy import sparse | ||
| from scipy.sparse import linalg as splin | ||
| except ImportError: | ||
| sys.exit("Numpy and Scipy must be installed for TVRegDiag to work - " | ||
| "aborting") | ||
|
|
||
| _has_matplotlib = True | ||
|
|
||
| try: | ||
| import matplotlib.pyplot as plt | ||
| except ImportError: | ||
| _has_matplotlib = False | ||
| print("Matplotlib is not installed - plotting functionality disabled") | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think it's worth having all this up here. If you try to import something but don't have it, python will give you a message about it already. |
||
|
|
||
| # Utility function. | ||
|
|
||
|
|
||
| def chop(v): | ||
| return v[1:] | ||
| import matplotlib.pyplot as plt | ||
| import numpy as np | ||
| import scipy as sp | ||
| from scipy import sparse | ||
| from scipy.sparse import linalg as splin | ||
|
|
||
|
|
||
| def TVRegDiff(data, itern, alph, u0=None, scale='small', ep=1e-6, dx=None, | ||
| plotflag=_has_matplotlib, diagflag=1, maxit=None): | ||
| if maxit is None: | ||
| maxit = len(data) | ||
|
|
||
| # code starts here | ||
| # Make sure we have a column vector | ||
| plotflag=True, diagflag=False, maxit=None): | ||
| # Input checking | ||
| data = np.array(data) | ||
| if (len(data.shape) != 1): | ||
| print("Error - data is not a column vector") | ||
| return | ||
| # Get the data size. | ||
| if (len(data.shape) != 1): raise ValueError("Data is must be a column vector") | ||
| if maxit is None: maxit = len(data) | ||
| n = len(data) | ||
|
|
||
| # Default checking. (u0 is done separately within each method.) | ||
| if dx is None: | ||
| dx = 1.0 / n | ||
| if dx is None: dx = 1.0 / n | ||
|
|
||
| # Different methods for small- and large-scale problems. | ||
| if (scale.lower() == 'small'): | ||
|
|
||
| if scale.lower() == 'small': | ||
| # Construct differentiation matrix. | ||
| c = np.ones(n + 1) / dx | ||
| D = sparse.spdiags([-c, c], [0, 1], n, n + 1) | ||
|
|
||
| DT = D.transpose() | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why call .transpose() when .T exists? |
||
|
|
||
| # Construct antidifferentiation operator and its adjoint. | ||
| def A(x): return chop(np.cumsum(x) - 0.5 * (x + x[0])) * dx | ||
| def A(x): return (np.cumsum(x) - 0.5 * (x + x[0]))[1:] * dx | ||
|
|
||
| def AT(w): return (sum(w) * np.ones(n + 1) - | ||
| np.transpose(np.concatenate(([sum(w) / 2.0], | ||
| np.cumsum(w) - | ||
| w / 2.0)))) * dx | ||
|
|
||
| # Default initialization is naive derivative | ||
|
|
||
| if u0 is None: | ||
| u0 = np.concatenate(([0], np.diff(data), [0])) | ||
|
|
||
| if u0 is None: u0 = np.concatenate(([0], np.diff(data), [0])) | ||
| u = u0 | ||
| # Since Au( 0 ) = 0, we need to adjust. | ||
| ofst = data[0] | ||
| # Precompute. | ||
| ATb = AT(ofst - data) # input: size n | ||
| ATb = AT(ofst - data) # input: size n | ||
|
|
||
| # Main loop. | ||
| for ii in range(1, itern+1): | ||
| # Diagonal matrix of weights, for linearizing E-L equation. | ||
| Q = sparse.spdiags(1. / (np.sqrt((D * u)**2 + ep)), 0, n, n) | ||
| # Linearized diffusion matrix, also approximation of Hessian. | ||
| L = dx * DT * Q * D | ||
|
|
||
| L = dx * D.T * Q * D | ||
|
|
||
| # Gradient of functional. | ||
| g = AT(A(u)) + ATb + alph * L * u | ||
|
|
@@ -235,34 +200,20 @@ def AT(w): return (sum(w) * np.ones(n + 1) - | |
| def linop(v): return (alph * L * v + AT(A(v))) | ||
| linop = splin.LinearOperator((n + 1, n + 1), linop) | ||
|
|
||
| [s, info_i] = sparse.linalg.cg(linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None, M=P, atol=0) | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This identical line was happening inside the |
||
| if diagflag: | ||
| [s, info_i] = sparse.linalg.cg( | ||
| linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None, | ||
| M=P, atol=0) | ||
| #print('iteration {0:4d}: relative change = {1:.3e}, ' | ||
| # 'gradient norm = {2:.3e}\n'.format(ii, | ||
| # np.linalg.norm( | ||
| # s[0]) / | ||
| # np.linalg.norm(u), | ||
| # np.linalg.norm(g))) | ||
| #if (info_i > 0): | ||
| # print("WARNING - convergence to tolerance not achieved!") | ||
| #elif (info_i < 0): | ||
| # print("WARNING - illegal input or breakdown") | ||
| else: | ||
| [s, info_i] = sparse.linalg.cg( | ||
| linop, g, x0=None, rtol=rtol, maxiter=maxit, callback=None, | ||
| M=P, atol=0) | ||
| print('iteration {0:4d}: relative change = {1:.3e}, gradient norm = {2:.3e}\n'.format( | ||
| ii, np.linalg.norm(s[0])/np.linalg.norm(u), np.linalg.norm(g))) | ||
| if (info_i > 0): print("WARNING - convergence to tolerance not achieved!") | ||
| elif (info_i < 0): print("WARNING - illegal input or breakdown") | ||
|
|
||
| # Update solution. | ||
| u = u - s | ||
| # Display plot. | ||
| if plotflag: | ||
| plt.plot(u) | ||
| plt.show() | ||
| u = u[0:-1] | ||
|
|
||
| elif (scale.lower() == 'large'): | ||
| if plotflag: plt.plot(u); plt.show() | ||
| u = u[:-1] | ||
|
|
||
| elif scale.lower() == 'large': | ||
| # Construct antidifferentiation operator and its adjoint. | ||
| def A(v): return np.cumsum(v) | ||
|
|
||
|
|
@@ -275,12 +226,10 @@ def AT(w): return (sum(w) * np.ones(len(w)) - | |
| mask = np.ones((n, n)) | ||
| mask[-1, -1] = 0.0 | ||
| D = sparse.dia_matrix(D.multiply(mask)) | ||
| DT = D.transpose() | ||
| # Since Au( 0 ) = 0, we need to adjust. | ||
| data = data - data[0] | ||
| # Default initialization is naive derivative. | ||
| if u0 is None: | ||
| u0 = np.concatenate(([0], np.diff(data))) | ||
| if u0 is None: u0 = np.concatenate(([0], np.diff(data))) | ||
| u = u0 | ||
| # Precompute. | ||
| ATd = AT(data) | ||
|
|
@@ -290,7 +239,7 @@ def AT(w): return (sum(w) * np.ones(len(w)) - | |
| # Diagonal matrix of weights, for linearizing E-L equation. | ||
| Q = sparse.spdiags(1. / np.sqrt((D*u)**2.0 + ep), 0, n, n) | ||
| # Linearized diffusion matrix, also approximation of Hessian. | ||
| L = DT*Q*D | ||
| L = D.T*Q*D | ||
| # Gradient of functional. | ||
| g = AT(A(u)) - ATd | ||
| g = g + alph * L * u | ||
|
|
@@ -305,57 +254,34 @@ def AT(w): return (sum(w) * np.ones(len(w)) - | |
| def linop(v): return (alph * L * v + AT(A(v))) | ||
| linop = splin.LinearOperator((n, n), linop) | ||
|
|
||
| print(maxit) | ||
| [s, info_i] = sparse.linalg.cg(linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None, M=(R.T @ R), atol=0) | ||
| if diagflag: | ||
| [s, info_i] = sparse.linalg.cg( | ||
| linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None, | ||
| M=np.dot(R.transpose(), R), atol=0) | ||
| print('iteration {0:4d}: relative change = {1:.3e}, ' | ||
| 'gradient norm = {2:.3e}\n'.format(ii, | ||
| np.linalg.norm(s[0]) / | ||
| np.linalg.norm(u), | ||
| np.linalg.norm(g))) | ||
| if (info_i > 0): | ||
| print("WARNING - convergence to tolerance not achieved!") | ||
| elif (info_i < 0): | ||
| print("WARNING - illegal input or breakdown") | ||
| print('iteration {0:4d}: relative change = {1:.3e}, gradient norm = {2:.3e}\n'.format( | ||
| ii, np.linalg.norm(s[0])/np.linalg.norm(u), np.linalg.norm(g))) | ||
| if (info_i > 0): print("WARNING - convergence to tolerance not achieved!") | ||
| elif (info_i < 0): print("WARNING - illegal input or breakdown") | ||
|
|
||
| else: | ||
| [s, info_i] = sparse.linalg.cg( | ||
| linop, -g, x0=None, rtol=rtol, maxiter=maxit, callback=None, | ||
| M=np.dot(R.transpose(), R), atol=0) | ||
| # Update current solution | ||
| u = u + s | ||
| # Display plot. | ||
| if plotflag: | ||
| plt.plot(u/dx) | ||
| plt.show() | ||
|
|
||
| if plotflag: plt.plot(u/dx); plt.show() | ||
| u = u/dx | ||
|
|
||
| return u | ||
|
|
||
| # Small testing script | ||
|
|
||
|
|
||
| if __name__ == "__main__": | ||
|
|
||
| if __name__ == "__main__": # Small testing script | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This script is now getting ignored by coverage. |
||
| dx = 0.05 | ||
| x0 = np.arange(0, 2.0*np.pi, dx) | ||
|
|
||
| testf = np.sin(x0) | ||
| testf += np.random.normal(0.0, 0.04, x0.shape) | ||
|
|
||
| testf = testf + np.random.normal(0.0, 0.04, x0.shape) | ||
|
|
||
| deriv_sm = TVRegDiff(testf, 1, 5e-2, dx=dx, | ||
| ep=1e-1, scale='small', plotflag=0) | ||
| deriv_lrg = TVRegDiff(testf, 100, 1e-1, dx=dx, | ||
| ep=1e-2, scale='large', plotflag=0) | ||
| deriv_sm = TVRegDiff(testf, 1, 5e-2, dx=dx, ep=1e-1, scale='small', plotflag=False, diagflag=True) | ||
| deriv_lrg = TVRegDiff(testf, 100, 1e-1, dx=dx, ep=1e-2, scale='large', plotflag=False, diagflag=True) | ||
|
|
||
| if (_has_matplotlib): | ||
| plt.plot(np.cos(x0), label='Analytical', c=(0,0,0)) | ||
| plt.plot(np.gradient(testf, dx), label='numpy.gradient') | ||
| plt.plot(deriv_sm, label='TVRegDiff (small)') | ||
| plt.plot(deriv_lrg, label='TVRegDiff (large)') | ||
| plt.legend() | ||
| plt.show() | ||
| plt.plot(np.cos(x0), label='Analytical', c=(0,0,0)) | ||
| plt.plot(np.gradient(testf, dx), label='numpy.gradient') | ||
| plt.plot(deriv_sm, label='TVRegDiff (small)') | ||
| plt.plot(deriv_lrg, label='TVRegDiff (large)') | ||
| plt.legend() | ||
| plt.show() | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The omits can live in .coveragerc, since I need one now anyway, and they're complicated enough that they probably should live there.