diff --git a/.coveragerc b/.coveragerc index a2e193a..9272d34 100644 --- a/.coveragerc +++ b/.coveragerc @@ -10,7 +10,7 @@ exclude_lines = pragma: no cover if __name__ == .__main__.: if diagflag: - if options != None: + if options is not None: ^def plot ^def plot_comparison ^def suggest_method diff --git a/.pylintrc b/.pylintrc index 10dd496..1f7636e 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,30 +1,15 @@ [MASTER] -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. -extension-pkg-allow-list= - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. (This is an alternative name to extension-pkg-allow-list -# for backward compatibility.) -extension-pkg-whitelist= - # Specify a score threshold to be exceeded before program exits with error. -fail-under=10.0 +fail-under=8.5 # Files or directories to be skipped. They should be base names, not paths. -ignore=CVS +ignore=_version.py, _chartrand_tvregdiff.py # Files or directories matching the regex patterns are skipped. The regex # matches against base names, not paths. ignore-patterns= -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the # number of processors available to use. jobs=1 @@ -34,10 +19,6 @@ jobs=1 # complex, nested conditions. limit-inference-results=100 -# List of plugins (as comma separated values of python module names) to load, -# usually to register additional checkers. -load-plugins= - # Pickle collected data for later comparisons. persistent=yes @@ -45,11 +26,6 @@ persistent=yes # user-friendly hints instead of false-positive error messages. suggestion-mode=yes -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - - [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show @@ -75,85 +51,17 @@ disable=dangerous-default-value, duplicate-code, invalid-name, invalid-unary-operand-type, - print-statement, - parameter-unpacking, - unpacking-in-except, - old-raise-syntax, - backtick, - long-suffix, - old-ne-operator, - old-octal-literal, - import-star-module-level, - non-ascii-bytes-literal, - raw-checker-failed, - bad-inline-option, - locally-disabled, - file-ignored, - suppressed-message, - useless-suppression, - deprecated-pragma, - use-symbolic-message-instead, - apply-builtin, - basestring-builtin, - buffer-builtin, - cmp-builtin, - coerce-builtin, - execfile-builtin, - file-builtin, - long-builtin, - raw_input-builtin, - reduce-builtin, - standarderror-builtin, - unicode-builtin, - xrange-builtin, - coerce-method, - delslice-method, - getslice-method, - setslice-method, - no-absolute-import, - old-division, - dict-iter-method, - dict-view-method, - next-method-called, - metaclass-assignment, - indexing-exception, - raising-string, - reload-builtin, - oct-method, - hex-method, - nonzero-method, - cmp-method, - input-builtin, - round-builtin, - intern-builtin, - unichr-builtin, - map-builtin-not-iterating, - zip-builtin-not-iterating, - range-builtin-not-iterating, - filter-builtin-not-iterating, - using-cmp-argument, - eq-without-hash, - div-method, - idiv-method, - rdiv-method, - exception-message-attribute, - invalid-str-codec, - sys-max-int, - bad-python3-import, - deprecated-string-function, - deprecated-str-translate-call, - deprecated-itertools-function, - deprecated-types-field, - next-method-defined, - dict-items-not-iterating, - dict-keys-not-iterating, - dict-values-not-iterating, - deprecated-operator-function, - deprecated-urllib-function, - xreadlines-attribute, - deprecated-sys-function, - exception-escape, - comprehension-escape + raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + multiple-statements, + multiple-imports, + too-many-positional-arguments # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option @@ -617,5 +525,5 @@ min-public-methods=2 # Exceptions that will emit a warning when being caught. Defaults to # "BaseException, Exception". -overgeneral-exceptions=BaseException, - Exception +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66968b6..99ff350 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,331 +2,70 @@ Thank you for your interest in contributing to PyNumDiff! This document provides guidelines and instructions for contributing to the project. -## Table of Contents +## Project Structure -- [How Can I Contribute?](#how-can-i-contribute) -- [Development Setup](#development-setup) -- [Code Style Guidelines](#code-style-guidelines) -- [Testing Guidelines](#testing-guidelines) -- [Pull Request Process](#pull-request-process) -- [Reporting Bugs](#reporting-bugs) -- [Proposing Features](#proposing-features) - -## How Can I Contribute? - -### Contributing Code - -1. Look for issues labeled `good first issue` if you're new to the project -2. Fork the repository -3. Create a branch for your changes -4. Make your changes following our guidelines -5. Submit a pull request - -## Development Setup - -### Prerequisites - -- Python 3.11 or higher -- Git -- (Optional) A virtual environment manager (venv, conda, etc.) - -### Setting Up the Development Environment - -1. **Fork the repository** on GitHub - -2. **Clone your fork** locally: - ```bash - git clone https://github.com/YOUR_USERNAME/PyNumDiff.git - cd PyNumDiff - ``` - -3. **Add the upstream repository**: - ```bash - git remote add upstream https://github.com/florisvb/PyNumDiff.git - ``` - -4. **Create a virtual environment** (recommended): - ```bash - # Using venv - python -m venv venv - - # Activate on Windows - venv\Scripts\activate - - # Activate on macOS/Linux - source venv/bin/activate - ``` - -5. **Install the package in development mode**: - ```bash - pip install -e . - ``` - -6. **Install development dependencies**: - ```bash - pip install pytest pylint - ``` - -7. **Verify the installation**: - ```bash - pytest -s - ``` - -### Project Structure - -- `pynumdiff/` - Main source code -- `examples/` - Jupyter notebook examples +- `pynumdiff/` - Main source code, tests in subfolder +- `notebooks/` - Jupyter notebook examples and experiments - `docs/` - Documentation source files - `.github/workflows/` - GitHub Actions CI configuration -- `tests/` - Test files (if applicable) - -## Code Style Guidelines - -### Python Style Guide - -There's no strict coding style enforced. The main guideline is to match the existing code style in the project. When contributing: - -- Match existing method signatures and docstring formats -- Follow the naming conventions used in the existing codebase -- Use 4 spaces for indentation (no tabs) - -### Code Quality - -The project uses `pylint` for code quality checks. While linting hasn't been strictly enforced recently, it will be important for the planned JOSS (Journal of Open Source Software) submission, which has stricter requirements. - -To run linting checks: - -1. **Run pylint** on your changes: - ```bash - pylint pynumdiff/ - ``` - -2. **Or use the project's linting script**: - ```bash - python linting.py - ``` - -### Editor Configuration - -The project includes an `.editorconfig` file that ensures consistent formatting. Most modern editors support EditorConfig automatically. - -## Testing Guidelines - -### Running Tests - -PyNumDiff uses `pytest` for testing. To run tests: - -```bash -# Run all tests -pytest -s - -# Run tests with plots (to visualize method results) -pytest -s --plot - -# Run tests with bounds (to print log error bounds) -pytest -s --bounds -``` - -### Writing Tests - -- Write tests for new features and bug fixes -- Follow the existing test structure -- Ensure all tests pass before submitting a PR -- Tests should be deterministic and not depend on external resources - -The test suite is organized into several test files: -- `test_diff_methods`: Broadly tests for correctness and ability to actually differentiate -- `test_utils`: Contains tests of miscellaneous functionality like simulations and evaluation metrics -- `test_optimize`: Tests the hyperparameter optimization code -### Continuous Integration +## Opening Issues -The project uses GitHub Actions for continuous integration. All pull requests are automatically tested. Make sure your changes pass all CI checks before requesting a review. +If you discover a bug or have an improvement idea or question, the place to start is the [Issues page](https://github.com/florisvb/PyNumDiff/issues), which is really the beating heart of any project (even if you're just here to give us kudos). Take a look through the history to get a sense of what has been done and which ideas have been considered before. A lot of hard-won knowledge and tough decisions have been explored and documented in the Issues. Feel free to open new issues if we haven't covered something. -## Pull Request Process +### Reporting Bugs -### Before Submitting +If reporting bugs, make sure you're on the latest version and that we haven't already taken care of something. Please include some or all of: -1. **Update your fork** with the latest changes from upstream: - ```bash - git fetch upstream - git checkout master - git merge upstream/master - ``` - -2. **Create a feature branch**: - ```bash - git checkout -b feature/your-feature-name - # or - git checkout -b fix/your-bug-fix-name - ``` - -3. **Make your changes** following the code style guidelines - -4. **Write or update tests** as needed - -5. **Run tests** to ensure everything passes: - ```bash - pytest -s - ``` - -6. **Run linting** to check code quality: - ```bash - python linting.py - ``` - -7. **Commit your changes** with clear, descriptive commit messages (see [Commit Messages](#commit-messages)) - -### Submitting a Pull Request - -1. **Push your branch** to your fork: - ```bash - git push origin feature/your-feature-name - ``` - -2. **Open a Pull Request** on GitHub: - - Go to the [PyNumDiff repository](https://github.com/florisvb/PyNumDiff) - - Click "New Pull Request" - - Select your fork and branch - - Fill out the PR template with: - - A clear title and description - - Reference to the related issue (e.g., "Fixes #169") - - Description of changes - - Any breaking changes - -3. **Wait for CI** to run and ensure all checks pass - -4. **Respond to feedback** from maintainers and reviewers - -5. **Keep your PR up to date** by rebasing on master if needed: - ```bash - git fetch upstream - git rebase upstream/master - git push --force-with-lease origin feature/your-feature-name - ``` - -### PR Guidelines - -- Smaller, focused PRs are generally easier to review -- Ensure all CI checks pass -- Request review from maintainers when ready -- Be responsive to feedback - -## Reporting Bugs - -### Before Submitting a Bug Report - -1. **Check existing issues** to see if the bug has already been reported -2. **Test with the latest version** to ensure the bug still exists -3. **Check the documentation** to ensure you're using the library correctly - -### How to Report a Bug - -When reporting a bug, please include: - -1. **Clear and descriptive title** -2. **Steps to reproduce**: +1. **Descriptive title** +2. **What happened**: - What you were trying to do - What you expected to happen - What actually happened 3. **Minimal code example** that reproduces the issue -4. **Environment information**: - - Python version - - PyNumDiff version - - Operating system -5. **Error messages** or stack traces (if applicable) +4. **Environment information**: Python and library versions (`pynumdiff`, `numpy`, `scipy`, anything salient) +5. **Error messages** or stack traces 6. **Additional context** (screenshots, data files, etc.) -### Bug Report Template - -```markdown -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. ... -2. ... - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Code example** -```python -# Minimal code that reproduces the issue -``` - -**Environment** -- Python version: -- PyNumDiff version: -- OS: - -**Additional context** -Add any other context about the problem here. -``` - -## Proposing Features - -### Before Proposing a Feature - -1. **Check existing issues** to see if the feature has been discussed -2. **Consider the scope** - is it within the project's goals? -3. **Think about implementation** - is it feasible? +### Proposing Features -### How to Propose a Feature +If we've got an ongoing or old discussion on a topic, and you can manage to find it, tack on discussion there. If your idea is otherwise within the scope of the project, start a new discussion. Let us know why you think something is necessary, and please feel free to suggest what would need to change to make it happen. The more investigation and thinking you do to show the feasibility and practicality of something, the more load that takes off other maintainers. -When proposing a feature, please include: +Here are some things you might include: -1. **Clear and descriptive title** +1. **Descriptive title** 2. **Problem statement**: What problem does this feature solve? 3. **Proposed solution**: How would you implement it? 4. **Alternatives considered**: What other approaches did you consider? 5. **Additional context**: Examples, use cases, etc. -### Feature Request Template +## Addressing Issues -```markdown -**Is your feature request related to a problem?** -A clear and concise description of what the problem is. +Look for issues labeled `good first issue` if you're new to the project. Talk to us, and we can suggest things that need to be donem, of varying levels of code and research difficulty. -**Describe the solution you'd like** -A clear and concise description of what you want to happen. +### Research -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. +Some issues will require going and digging into alternative methods of differentiation so they can be added to our collection, or comparing a new or modified method to other methods. This kind of work requires some mathematical chops, but if you're down, we're happy about it. -**Additional context** -Add any other context or examples about the feature request here. -``` - -## Commit Messages - -We encourage descriptive commit messages that explain what changed and why. -Long, detailed commit messages are appreciated as they help others understand -the project's history. - -### Types - -- `feat`: A new feature -- `fix`: A bug fix -- `docs`: Documentation only changes -- `style`: Code style changes (formatting, missing semicolons, etc.) -- `refactor`: Code refactoring without changing functionality - -## Questions? - -If you have questions about contributing: +### Contributing Code -1. Check the [documentation](https://pynumdiff.readthedocs.io/) -2. Look through [existing issues](https://github.com/florisvb/PyNumDiff/issues) -3. Open a new issue with the `question` label +1. Fork the repository (button on the main repo page) +2. Clone down your version (`git clone https://github.com/YOUR_USERNAME/PyNumDiff.git`) +3. Set its upstream to point to this version so you can easily pull our changes (`git remote add upstream https://github.com/florisvb/PyNumDiff.git`) +4. Install the package in development mode (`pip install -e .`) as well as dependencies like `numpy`, `pytest`, `cvxpy`, etc. +5. Create a branch for your changes (`cd PyNumDiff`; `git checkout -b your-feature`) +6. Make your changes and commit (`git add file`; `git commit -m "descriptive commit message"`) +7. Update your fork with the latest changes from upstream (`git fetch upstream`; `git checkout master`; `git merge upstream/master`) +8. Run the tests to make sure they pass (`pytest -s`, with helpful `--plot` or `--bounds` flags for debugging), possibly adding new ones +9. Push your code up to the cloud (`git push`) +10. Submit a pull request ("PR") (via the pull requests page on the website) +11. We'll review, leave comments, kick around further change ideas, and merge. -## Additional Resources +No strict coding style is enforced, although we consider docstrings to be very important. The project uses `pylint` for code quality checks (`pylint pynumdiff`), because we're trying to meet a high bar so the JOSS (Journal of Open Source Software) likes us. -- [PyNumDiff Documentation](https://pynumdiff.readthedocs.io/) -- [Project README](README.md) -- [GitHub Issues](https://github.com/florisvb/PyNumDiff/issues) -- [PEP 8 Style Guide](https://www.python.org/dev/peps/pep-0008/) -- [pytest Documentation](https://docs.pytest.org/) +Bear in mind that smaller, focused PRs are generally easier to review. We encourage descriptive commit messages that explain what changed and why. Long, detailed commit messages are appreciated as they help others understand the project's history. -Thank you for contributing to PyNumDiff! 🎉 \ No newline at end of file +Once you push, GitHub Actions will kick off our continuous integration job, which runs the tests, including: +- `test_diff_methods`: Broadly tests for correctness and ability to actually differentiate +- `test_utils`: Contains tests of supporting and miscellaneous functionality like simulations and evaluation metrics +- `test_optimize`: Tests the hyperparameter optimization code diff --git a/README.md b/README.md index af06416..1cf8918 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,6 @@ See the README in the `notebooks/` folder for a full guide to all demos and expe - `CITATION.cff` is citation information for the Journal of Open-Source Software (JOSS) paper associated with this project. - `LICENSE.txt` allows free usage of this project. - `README.md` is the text you're reading, hello. -- `linting.py` is a script to run `pylint`. - `pyproject.toml` governs how this package is set up and installed, including dependencies. ## Citation diff --git a/linting.py b/linting.py deleted file mode 100644 index fc71cfe..0000000 --- a/linting.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- -import re, sys, argparse -from pylint import lint - -# Call `echo $?` to see the exit code after a run. If the score is over this, the exit -# code will be 0 (success), and if not will be nonzero (failure). -THRESHOLD = 8.5 - -if len(sys.argv) < 2: - raise argparse.ArgumentError("Module to evaluate needs to be the first argument") - -sys.argv[1] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[1]) -run = lint.Run([sys.argv[1], f"--fail-under={THRESHOLD}"]) diff --git a/notebooks/README.md b/notebooks/README.md index f46ce70..425a372 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -5,6 +5,6 @@ | [Basic Tutorial](https://github.com/florisvb/PyNumDiff/blob/master/notebooks/1_basic_tutorial.ipynb) | Demo to show invocations of all the major methods in this library on 1D data. | | [Optimizing Hyperparameters](https://github.com/florisvb/PyNumDiff/blob/master/notebooks/2_optimizing_hyperparameters.ipynb) | All methods' answers are affected by choices of hyperparameters, which complicates differentiation if the true derivative is not known. Here we briefly cover metrics we'd like to optimize and show how to use our `optimize` function to find good hyperparameter choices. | | [Automatic Method Suggestion](https://github.com/florisvb/PyNumDiff/blob/master/notebooks/3_automatic_method_suggestion.ipynb) | A short demo of how to allow `pynumdiff` to choose a differentiation method for your data. | -| [Performance Analysis](https://github.com/florisvb/PyNumDiff/blob/master/notebooks/4_performance_analyssi.ipynb) | Experiments to compare methods' accuracy and bias across simulations. | +| [Performance Analysis](https://github.com/florisvb/PyNumDiff/blob/master/notebooks/4_performance_analysis.ipynb) | Experiments to compare methods' accuracy and bias across simulations. | | [Robustness to Outliers Demo](https://github.com/florisvb/PyNumDiff/blob/master/notebooks/5_robust_outliers_demo.ipynb) | This notebook shows a head-to-head of `RTSDiff`'s and `RobustDiff`'s minimum-RMSE performances on simulations with outliers, to illustrate the value of using a Huber loss in the Kalman MAP problem. | | [Multidimensionality Demo](https://github.com/florisvb/PyNumDiff/blob/master/notebooks/6_multidimensionality_demo.ipynb) | Demonstration of differentating multidimensional data along particular axes. | \ No newline at end of file diff --git a/pynumdiff/basis_fit.py b/pynumdiff/basis_fit.py index 439212a..2f74500 100644 --- a/pynumdiff/basis_fit.py +++ b/pynumdiff/basis_fit.py @@ -1,6 +1,6 @@ """Methods based on fitting basis functions to data""" -import numpy as np from warnings import warn +import numpy as np from scipy import sparse from pynumdiff.utils import utility @@ -23,14 +23,14 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `high_freq_cutoff`, " + "`even_extension`, and `pad_to_zero_dxdt` instead.", DeprecationWarning) high_freq_cutoff = params[0] if isinstance(params, list) else params - if options != None: + if options is not None: if 'even_extension' in options: even_extension = options['even_extension'] if 'pad_to_zero_dxdt' in options: pad_to_zero_dxdt = options['pad_to_zero_dxdt'] - elif high_freq_cutoff == None: + elif high_freq_cutoff is None: raise ValueError("`high_freq_cutoff` must be given.") L = len(x) @@ -105,7 +105,7 @@ def rbfdiff(x, dt_or_t, sigma=1, lmbd=0.01): cutoff = np.sqrt(-2 * sigma**2 * np.log(1e-4)) rows, cols, vals, dvals = [], [], [], [] - for n in range(len(t)): + for n in range(len(t)): # pylint: disable=consider-using-enumerate # Only consider points within a cutoff. Gaussian drops below eps at distance ~ sqrt(-2*sigma^2 log eps) l = np.searchsorted(t, t[n] - cutoff) # O(log N) to find indices of points within cutoff r = np.searchsorted(t, t[n] + cutoff) # finds index where new value should be inserted diff --git a/pynumdiff/finite_difference.py b/pynumdiff/finite_difference.py index ea88568..6ac6884 100644 --- a/pynumdiff/finite_difference.py +++ b/pynumdiff/finite_difference.py @@ -1,8 +1,8 @@ """This module implements some common finite difference schemes. This is handy for this module https://web.media.mit.edu/~crtaylor/calculator.html""" +from warnings import warn import numpy as np from pynumdiff.utils import utility -from warnings import warn def finitediff(x, dt, num_iterations=1, order=2, axis=0): @@ -87,7 +87,7 @@ def first_order(x, dt, params=None, options={}, num_iterations=1): warn("`first_order` in past releases was actually calculating a second-order FD. Use `second_order` to achieve " + "approximately the same behavior. Note that odd-order methods have asymmetrical stencils, which causes " + "horizontal drift in the answer, especially when iterating.", DeprecationWarning) - if params != None and 'iterate' in options: + if params is not None and 'iterate' in options: warn("`params` and `options` parameters will be removed in a future version. Use `num_iterations` instead.", DeprecationWarning) num_iterations = params[0] if isinstance(params, list) else params diff --git a/pynumdiff/kalman_smooth.py b/pynumdiff/kalman_smooth.py index 4cfa139..bc003cf 100644 --- a/pynumdiff/kalman_smooth.py +++ b/pynumdiff/kalman_smooth.py @@ -1,8 +1,7 @@ """This module implements constant-derivative model-based smoothers based on Kalman filtering and its generalization.""" -import numpy as np from warnings import warn +import numpy as np from scipy.linalg import expm, sqrtm -from time import time try: import cvxpy except ImportError: pass @@ -47,7 +46,7 @@ def kalman_filter(y, dt_or_t, xhat0, P0, A, Q, C, R, B=None, u=None, save_P=True else: M = np.block([[A, Q],[np.zeros(A.shape), -A.T]]) # If variable dt, we'll exponentiate this a bunch if control: Mc = np.block([[A, B],[np.zeros((A.shape[0], 2*A.shape[1]))]]) - + for n in range(N): if n == 0: # first iteration is a special case, involving less work xhat_ = xhat0 @@ -61,10 +60,10 @@ def kalman_filter(y, dt_or_t, xhat0, P0, A, Q, C, R, B=None, u=None, save_P=True if dt < 0: Qn = np.abs(Qn) # eigenvalues go negative if reverse time, but noise shouldn't shrink if control: eM = expm(Mc * dt) - Bn = eM[:m,m:] # upper right block + Bn = eM[:m,m:] # upper right block xhat_ = An @ xhat + Bn @ u[n] if control else An @ xhat # ending underscores denote an a priori prediction P_ = An @ P @ An.T + Qn # the dense matrix multiplies here are the most expensive step - + xhat = xhat_.copy() # copies, lest modifications to these variables change the a priori estimates. See #122 P = P_.copy() if not np.isnan(y[n]): # handle missing data @@ -96,12 +95,12 @@ def rts_smooth(dt_or_t, A, xhat_pre, xhat_post, P_pre, P_post, compute_P_smooth= """ xhat_smooth = np.empty(xhat_post.shape); xhat_smooth[-1] = xhat_post[-1] # preallocate arrays if compute_P_smooth: P_smooth = np.empty(P_post.shape); P_smooth[-1] = P_post[-1] - + equispaced = np.isscalar(dt_or_t) # to avoid calling this in the loop if equispaced: An = A # in this case only assign once, outside the loop for n in range(xhat_pre.shape[0]-2, -1, -1): if not equispaced: An = expm(A * (dt_or_t[n+1] - dt_or_t[n])) # state transition from n to n+1 - C_RTS = P_post[n] @ An.T @ np.linalg.inv(P_pre[n+1]) # the [n+1]th index holds _{n+1|n} info + C_RTS = P_post[n] @ An.T @ np.linalg.inv(P_pre[n+1]) # the [n+1]th index holds _{n+1|n} info xhat_smooth[n] = xhat_post[n] + C_RTS @ (xhat_smooth[n+1] - xhat_pre[n+1]) # The original authors use C, not to be confused if compute_P_smooth: P_smooth[n] = P_post[n] + C_RTS @ (P_smooth[n+1] - P_pre[n+1]) @ C_RTS.T # with the measurement matrix @@ -145,7 +144,7 @@ def rtsdiff(x, dt_or_t, order, log_qr_ratio, forwardbackward): Q = eM[:order+1,order+1:] @ A.T xhat_pre, xhat_post, P_pre, P_post = kalman_filter(x, dt_or_t, xhat0, P0, A, Q, C, R) # noisy x are the "y" in Kalman-land - xhat_smooth = rts_smooth(dt_or_t, A, xhat_pre, xhat_post, P_pre, P_post, compute_P_smooth=False) + xhat_smooth = rts_smooth(dt_or_t, A, xhat_pre, xhat_post, P_pre, P_post, compute_P_smooth=False) x_hat_forward = xhat_smooth[:, 0] # first dimension is time, so slice first element at all times dxdt_hat_forward = xhat_smooth[:, 1] @@ -156,7 +155,7 @@ def rtsdiff(x, dt_or_t, order, log_qr_ratio, forwardbackward): if np.isscalar(dt_or_t): A = np.linalg.inv(A) # discrete time dynamics are just the inverse else: dt_or_t = dt_or_t[::-1] # in continuous time, reverse the time vector so dts go negative - + xhat_pre, xhat_post, P_pre, P_post = kalman_filter(x[::-1], dt_or_t, xhat0, P0, A, Q, C, R) xhat_smooth = rts_smooth(dt_or_t, A, xhat_pre, xhat_post, P_pre, P_post, compute_P_smooth=False) x_hat_backward = xhat_smooth[:, 0][::-1] # the result is backwards still, so reverse it @@ -186,13 +185,13 @@ def constant_velocity(x, dt, params=None, options=None, r=None, q=None, forwardb :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # boilerplate backwards compatibility code + if params is not None: # boilerplate backwards compatibility code warn("`params` and `options` parameters will be removed in a future version. Use `r`, " + "`q`, and `forwardbackward` instead.", DeprecationWarning) r, q = params - if options != None: + if options is not None: if 'forwardbackward' in options: forwardbackward = options['forwardbackward'] - elif r == None or q == None: + elif r is None or q is None: raise ValueError("`q` and `r` must be given.") warn("`constant_velocity` is deprecated. Call `rtsdiff` with order 1 instead.", DeprecationWarning) @@ -216,13 +215,13 @@ def constant_acceleration(x, dt, params=None, options=None, r=None, q=None, forw :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # boilerplate backwards compatibility code + if params is not None: # boilerplate backwards compatibility code warn("`params` and `options` parameters will be removed in a future version. Use `r`, " + "`q`, and `forwardbackward` instead.", DeprecationWarning) r, q = params - if options != None: + if options is not None: if 'forwardbackward' in options: forwardbackward = options['forwardbackward'] - elif r == None or q == None: + elif r is None or q is None: raise ValueError("`q` and `r` must be given.") warn("`constant_acceleration` is deprecated. Call `rtsdiff` with order 2 instead.", DeprecationWarning) @@ -246,13 +245,13 @@ def constant_jerk(x, dt, params=None, options=None, r=None, q=None, forwardbackw :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # boilerplate backwards compatibility code + if params is not None: # boilerplate backwards compatibility code warn("`params` and `options` parameters will be removed in a future version. Use `r`, " + "`q`, and `forwardbackward` instead.", DeprecationWarning) r, q = params - if options != None: + if options is not None: if 'forwardbackward' in options: forwardbackward = options['forwardbackward'] - elif r == None or q == None: + elif r is None or q is None: raise ValueError("`q` and `r` must be given.") warn("`constant_jerk` is deprecated. Call `rtsdiff` with order 3 instead.", DeprecationWarning) @@ -299,7 +298,7 @@ def robustdiff(x, dt, order, log_q, log_r, proc_huberM=6, meas_huberM=0): Q_c = np.zeros(A_c.shape); Q_c[-1,-1] = 10**log_q # continuous-time uncertainty around the last derivative C = np.zeros((1, order+1)); C[0,0] = 1 # we measure only y = noisy x R = np.array([[10**log_r]]) # 1 observed state, so this is 1x1 - + # convert to discrete time using matrix exponential eM = expm(np.block([[A_c, Q_c], [np.zeros(A_c.shape), -A_c.T]]) * dt) # Note this could handle variable dt, similar to rtsdiff A_d = eM[:order+1, :order+1] @@ -347,6 +346,6 @@ def convex_smooth(y, A, Q, C, R, B=None, u=None, proc_huberM=6, meas_huberM=0): problem = cvxpy.Problem(cvxpy.Minimize(objective)) try: problem.solve(solver=cvxpy.CLARABEL) except cvxpy.error.SolverError: pass # Could try another solver here, like SCS, but slows things down - + if x_states.value is None: return np.full((N, A.shape[0]), np.nan) # There can be solver failure, even without error return x_states.value.T diff --git a/pynumdiff/linear_model.py b/pynumdiff/linear_model.py index 93e08c5..faf8a95 100644 --- a/pynumdiff/linear_model.py +++ b/pynumdiff/linear_model.py @@ -1,6 +1,7 @@ +"""Fit a linear system model in a sliding window""" +from warnings import warn import math, scipy import numpy as np -from warnings import warn from pynumdiff.finite_difference import second_order as finite_difference from pynumdiff.polynomial_fit import savgoldiff as _savgoldiff # patch through @@ -12,17 +13,17 @@ except ImportError: pass -def savgoldiff(*args, **kwargs): # pragma: no cover +def savgoldiff(*args, **kwargs): # pragma: no cover pylint: disable=missing-function-docstring warn("`savgoldiff` has moved to `polynomial_fit.savgoldiff` and will be removed from " + "`linear_model` in a future release.", DeprecationWarning) return _savgoldiff(*args, **kwargs) -def polydiff(*args, **kwargs): # pragma: no cover +def polydiff(*args, **kwargs): # pragma: no cover pylint: disable=missing-function-docstring warn("`polydiff` has moved to `polynomial_fit.polydiff` and will be removed from " + "`linear_model` in a future release.", DeprecationWarning) return _polydiff(*args, **kwargs) -def spectraldiff(*args, **kwargs): # pragma: no cover +def spectraldiff(*args, **kwargs): # pragma: no cover pylint: disable=missing-function-docstring warn("`spectraldiff` has moved to `basis_fit.spectraldiff` and will be removed from " + "`linear_model` in a future release.", DeprecationWarning) return _spectraldiff(*args, **kwargs) @@ -86,17 +87,17 @@ def lineardiff(x, dt, params=None, options=None, order=None, gamma=None, window_ :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: + if params is not None: warn("`params` and `options` parameters will be removed in a future version. Use `order`, " + "`gamma`, and `window_size` instead.", DeprecationWarning) order, gamma = params[:2] - if len(params) > 2: window_size = params[2] - if options != None: + if len(params) > 2: window_size = params[2] + if options is not None: if 'sliding' in options and not options['sliding']: window_size = None if 'step_size' in options: step_size = options['step_size'] if 'kernel_name' in options: kernel = options['kernel_name'] if 'solver' in options: solver = options['solver'] - elif order == None or gamma == None or window_size == None: + elif order is None or gamma is None or window_size is None: raise ValueError("`order`, `gamma`, and `window_size` must be given.") def _lineardiff(x, dt, order, gamma, solver=None): @@ -117,17 +118,16 @@ def _lineardiff(x, dt, order, gamma, solver=None): # Add the integration constants Csum = 0 - t = np.arange(0, X.shape[1])*dt - for n in range(0, order - 1): + t = np.arange(X.shape[1])*dt + for n in range(order - 1): C_subscript = n t_exponent = order - n - 2 den = math.factorial(t_exponent) Cn = np.vstack([1/den*C[i, C_subscript]*t**t_exponent for i in range(X.shape[0])]) Csum = Csum + Cn - Csum = np.array(Csum) # Use A and C to calculate the derivative - Xdot_reconstructed = (A@X + Csum) + Xdot_reconstructed = A@X + Csum dxdt_hat = np.ravel(Xdot_reconstructed[-1, :]) x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt) @@ -137,7 +137,7 @@ def _lineardiff(x, dt, order, gamma, solver=None): if not window_size: return _lineardiff(x, dt, order, gamma, solver) - elif window_size % 2 == 0: + if window_size % 2 == 0: window_size += 1 warn("Kernel window size should be odd. Added 1 to length.") @@ -147,7 +147,7 @@ def _lineardiff(x, dt, order, gamma, solver=None): x_hat_backward, _ = utility.slide_function(_lineardiff, x[::-1], dt, kernel, order, gamma, stride=step_size, solver=solver) # weights - w = np.arange(1, len(x_hat_forward)+1,1)[::-1] + w = np.arange(1, len(x_hat_forward)+1)[::-1] w = np.pad(w, [0, len(x)-len(w)], mode='constant') wfb = np.vstack((w, w[::-1])) norm = np.sum(wfb, axis=0) @@ -158,6 +158,4 @@ def _lineardiff(x, dt, order, gamma, solver=None): # merge x_hat = x_hat_forward*w/norm + x_hat_backward*w[::-1]/norm - x_hat, dxdt_hat = finite_difference(x_hat, dt) - - return x_hat, dxdt_hat + return finite_difference(x_hat, dt) diff --git a/pynumdiff/optimize.py b/pynumdiff/optimize.py index 8a766ab..e8c88dc 100644 --- a/pynumdiff/optimize.py +++ b/pynumdiff/optimize.py @@ -1,10 +1,11 @@ -import scipy.optimize -import numpy as np +"""Optimization functionality""" +from hashlib import sha1 from itertools import product from functools import partial from warnings import filterwarnings, warn from multiprocessing import Pool, Manager -from hashlib import sha1 +import scipy.optimize +import numpy as np from tqdm import tqdm from .utils import evaluate, utility @@ -149,7 +150,7 @@ def _objective_function(point, func, x, dt, singleton_params, categorical_params if metric == 'rmse': # minimize ||dxdt_hat - dxdt_truth||_2 rmse_dxdt = evaluate.rmse(dxdt_truth, dxdt_hat, padding=padding) cache[key] = rmse_dxdt; return rmse_dxdt - elif metric == 'error_correlation': + if metric == 'error_correlation': ec = evaluate.error_correlation(dxdt_truth, dxdt_hat, padding=padding) cache[key] = ec; return ec else: # then minimize L(Phi) = (RMSE(trapz(dxdt_hat) + c - x) || sqrt{2*Mean(Huber((trapz(dxdt_hat) + c - x)/sigma, M))}*sigma) + gamma*TV(dxdt_hat) @@ -230,7 +231,7 @@ def optimize(func, x, dt, dxdt_truth=None, tvgamma=1e-2, search_space_updates={} _minimize = partial(scipy.optimize.minimize, _obj_fun, method=opt_method, bounds=bounds, options={'maxiter':maxiter}) results += pool.map(_minimize, starting_points) # returns a bunch of OptimizeResult objects else: # For experiments, where I want to parallelize optimization calls and am not allowed to have each spawn further processes - cache = dict() + cache = {} # dict for categorical_combo in categorical_combos: _obj_fun = partial(_objective_function, func=func, x=x, dt=dt, singleton_params=singleton_params, categorical_params=categorical_combo, search_space_types=search_space_types, dxdt_truth=dxdt_truth, @@ -241,7 +242,7 @@ def optimize(func, x, dt, dxdt_truth=None, tvgamma=1e-2, search_space_updates={} opt_idx = np.nanargmin([r.fun for r in results]) opt_point = results[opt_idx].x # results are going to be floats, but that may not be allowed, so convert back to a dict - opt_params = {k:(v if search_space_types[k] == float else + opt_params = {k:(v if search_space_types[k] == float else int(np.round(v)) if search_space_types[k] == int else v > 0.5) for k,v in zip(search_space_types, opt_point)} opt_params.update(singleton_params) diff --git a/pynumdiff/polynomial_fit.py b/pynumdiff/polynomial_fit.py index 6c0a52a..74ac4bd 100644 --- a/pynumdiff/polynomial_fit.py +++ b/pynumdiff/polynomial_fit.py @@ -1,7 +1,7 @@ """Methods based on fitting data with polynomials""" +from warnings import warn import numpy as np import scipy -from warnings import warn from pynumdiff.utils import utility @@ -23,11 +23,11 @@ def splinediff(x, dt_or_t, params=None, options=None, degree=3, s=None, num_iter :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `order`, `s`, and " + "`num_iterations` instead.", DeprecationWarning) degree, s = params[0:2] - if options != None: + if options is not None: if 'iterate' in options and options['iterate']: num_iterations = params[2] if np.isscalar(dt_or_t): @@ -64,16 +64,16 @@ def polydiff(x, dt, params=None, options=None, degree=None, window_size=None, st :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: + if params is not None: warn("`params` and `options` parameters will be removed in a future version. Use `degree` " + "and `window_size` instead.", DeprecationWarning) degree = params[0] if len(params) > 1: window_size = params[1] - if options != None: + if options is not None: if 'sliding' in options and not options['sliding']: window_size = None if 'step_size' in options: step_size = options['step_size'] if 'kernel_name' in options: kernel = options['kernel_name'] - elif degree == None or window_size == None: + elif degree is None or window_size is None: raise ValueError("`degree` and `window_size` must be given.") if window_size < degree*3: @@ -117,11 +117,11 @@ def savgoldiff(x, dt, params=None, options=None, degree=None, window_size=None, :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `degree`, " + "`window_size`, and `smoothing_win` instead.", DeprecationWarning) degree, window_size, smoothing_win = params - elif degree == None or window_size == None or smoothing_win == None: + elif degree is None or window_size is None or smoothing_win is None: raise ValueError("`degree`, `window_size`, and `smoothing_win` must be given.") window_size = np.clip(window_size, degree + 1, len(x)-1) diff --git a/pynumdiff/smooth_finite_difference.py b/pynumdiff/smooth_finite_difference.py index 66c4fed..2812064 100644 --- a/pynumdiff/smooth_finite_difference.py +++ b/pynumdiff/smooth_finite_difference.py @@ -1,7 +1,6 @@ """Apply smoothing method before finite difference.""" -import numpy as np -import scipy.signal from warnings import warn +import scipy.signal # included code from pynumdiff.finite_difference import second_order as finite_difference @@ -53,7 +52,7 @@ def meandiff(x, dt, params=None, options={}, window_size=5, num_iterations=1): :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `window_size` " + "and `num_iterations` instead.", DeprecationWarning) window_size = params[0] if isinstance(params, list) else params @@ -78,7 +77,7 @@ def mediandiff(x, dt, params=None, options={}, window_size=5, num_iterations=1): :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `window_size` " + "and `num_iterations` instead.", DeprecationWarning) window_size = params[0] if isinstance(params, list) else params @@ -104,7 +103,7 @@ def gaussiandiff(x, dt, params=None, options={}, window_size=5, num_iterations=1 :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `window_size` " + "and `num_iterations` instead.", DeprecationWarning) window_size = params[0] if isinstance(params, list) else params @@ -130,7 +129,7 @@ def friedrichsdiff(x, dt, params=None, options={}, window_size=5, num_iterations :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `window_size` " + "and `num_iterations` instead.", DeprecationWarning) window_size = params[0] if isinstance(params, list) else params @@ -157,7 +156,7 @@ def butterdiff(x, dt, params=None, options={}, filter_order=2, cutoff_freq=0.5, :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `filter_order`, " + "`cutoff_freq`, and `num_iterations` instead.", DeprecationWarning) filter_order, cutoff_freq = params[0:2] @@ -174,7 +173,7 @@ def butterdiff(x, dt, params=None, options={}, filter_order=2, cutoff_freq=0.5, return finite_difference(x_hat, dt) -def splinediff(*args, **kwargs): # pragma: no cover +def splinediff(*args, **kwargs): # pragma: no cover pylint: disable=missing-function-docstring warn("`splindiff` has moved to `polynomial_fit.splinediff` and will be removed from " + "`smooth_finite_difference` in a future release.", DeprecationWarning) return _splinediff(*args, **kwargs) diff --git a/pynumdiff/tests/conftest.py b/pynumdiff/tests/conftest.py index 6a8e473..8554a6c 100644 --- a/pynumdiff/tests/conftest.py +++ b/pynumdiff/tests/conftest.py @@ -1,17 +1,20 @@ """Pytest configuration for pynumdiff tests. This is what enables the --plot and --bounds flags to work.""" -import pytest -from matplotlib import pyplot from collections import defaultdict +from matplotlib import pyplot +import pytest def pytest_addoption(parser): + """Make some flags available to tests that take the :code:`request` argument""" parser.addoption("--plot", action="store_true", default=False) # whether to show plots parser.addoption("--bounds", action="store_true", default=False) # whether to print error bounds @pytest.fixture(scope="session", autouse=True) def store_plots(request): + """This makes these plots available to tests that take the :code:`request` argument""" request.config.plots = defaultdict(lambda: pyplot.subplots(6, 2, figsize=(12,7))) # 6 is len(test_funcs_and_derivs) def pytest_sessionfinish(session, exitstatus): + """This is done at the end of the session, when tests are done running""" if not hasattr(session.config, 'plots'): return for method,(fig,axes) in session.config.plots.items(): fig.legend(*axes[-1, -1].get_legend_handles_labels(), loc='lower left', ncol=2) diff --git a/pynumdiff/tests/test_diff_methods.py b/pynumdiff/tests/test_diff_methods.py index ac9af13..b3a7c40 100644 --- a/pynumdiff/tests/test_diff_methods.py +++ b/pynumdiff/tests/test_diff_methods.py @@ -1,6 +1,6 @@ +"""Unit tests of core differentiation functionality""" import numpy as np from pytest import mark -from warnings import warn from ..finite_difference import finitediff, first_order, second_order, fourth_order from ..linear_model import lineardiff @@ -309,7 +309,7 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re # Similar to the error_bounds table, index by method first. But then we test against only one 2D function, # and only in the absence of noise, since the other test covers that. Instead, because multidimensional # derivatives can be combined in interesting fashions, we find d^2 / dt_1 dt_2 and the Laplacian, -# d^2/dt_1^2 + d^2/dt_2^2. Tuples are again (L2,Linf) distances. +# d^2/dt_1^2 + d^2/dt_2^2. Tuples are again (L2,Linf) distances. multidim_error_bounds = { finitediff: [(0, -1), (1, -1)] } @@ -332,13 +332,15 @@ def test_multidimensionality(multidim_method_and_params, request): computed_laplacian = diff_method(dxdt1, dt2, **params, axis=0)[1] + diff_method(dxdt2, dt2, **params, axis=1)[1] l2_error_lap = np.linalg.norm(analytic_laplacian - computed_laplacian) linf_error_lap = np.max(np.abs(analytic_laplacian - computed_laplacian)) - + if request.config.getoption("--bounds"): print([(int(np.ceil(np.log10(l2_error_d2))), int(np.ceil(np.log10(linf_error_d2)))), (int(np.ceil(np.log10(l2_error_lap))), int(np.ceil(np.log10(linf_error_lap))))]) else: (log_l2_bound_d2, log_linf_bound_d2), (log_l2_bound_lap, log_linf_bound_lap) = multidim_error_bounds[diff_method] assert l2_error_d2 < 10**log_l2_bound_d2 assert linf_error_d2 < 10**log_linf_bound_d2 + assert l2_error_lap < 10**log_l2_bound_lap + assert linf_error_lap < 10**log_linf_bound_lap if request.config.getoption("--plot"): from matplotlib import pyplot @@ -354,7 +356,7 @@ def test_multidimensionality(multidim_method_and_params, request): ax2.set_xlabel(r'$t_1$') ax2.set_ylabel(r'$t_2$') ax3 = fig.add_subplot(1, 3, 3, projection='3d') - surf = ax3.plot_surface(T1, T2, analytic_laplacian, cmap='viridis', alpha=0.5, label='analytic') + ax3.plot_surface(T1, T2, analytic_laplacian, cmap='viridis', alpha=0.5, label='analytic') ax3.set_title(r'$\frac{\partial^2}{\partial t_1^2} + \frac{\partial^2}{\partial t_2^2}$') ax3.set_xlabel(r'$t_1$') ax3.set_ylabel(r'$t_2$') diff --git a/pynumdiff/tests/test_optimize.py b/pynumdiff/tests/test_optimize.py index fd2d509..eedcdf5 100644 --- a/pynumdiff/tests/test_optimize.py +++ b/pynumdiff/tests/test_optimize.py @@ -1,5 +1,5 @@ +"""Unit tests of optimizer""" import numpy as np -from pytest import skip from ..smooth_finite_difference import butterdiff from ..polynomial_fit import splinediff @@ -28,7 +28,7 @@ def test_targeting_rmse_vs_tvgamma_loss(): """Ensure optimization properly targets different metrics""" params_rmse, val_rmse = optimize(splinediff, x, dt, dxdt_truth=dxdt_truth, parallel=False) # so coverage picks it up, because multiprocessing coverage is broken params_loss, val_loss = optimize(splinediff, x, dt, tvgamma=tvgamma) - + x_hat, dxdt_hat = splinediff(x, dt, **params_loss) loss_rmse = rmse(dxdt_truth, dxdt_hat) diff --git a/pynumdiff/tests/test_utils.py b/pynumdiff/tests/test_utils.py index 6011bf0..4c12941 100644 --- a/pynumdiff/tests/test_utils.py +++ b/pynumdiff/tests/test_utils.py @@ -1,6 +1,7 @@ """Unit tests for utility functions""" -# pylint: skip-file import numpy as np +from matplotlib import pyplot + from pynumdiff.utils import utility, evaluate from pynumdiff.utils.simulate import sine, triangle, pop_dyn, linear_autonomous, pi_cruise_control, lorenz_x np.random.seed(42) # The answer to life, the universe, and everything @@ -59,7 +60,6 @@ def test_peakdet(request): maxtab, mintab = utility.peakdet(x, 0.5, t) if request.config.getoption("--plot"): - from matplotlib import pyplot pyplot.plot(t, x) pyplot.plot(mintab[:,0], mintab[:,1], 'g*') pyplot.plot(maxtab[:,0], maxtab[:,1], 'r*') @@ -95,7 +95,6 @@ def identity(x, dt): return x, 0 # should come back the same def test_simulations(request): """Just sprint through running them all to make sure they go. Optionally plot with flag.""" if request.config.getoption("--plot"): - from matplotlib import pyplot axes = [pyplot.subplots(2, 3, figsize=(18,7), constrained_layout=True)[1] for i in range(3)] for j,dt in enumerate([0.005, 0.01, 0.02]): diff --git a/pynumdiff/total_variation_regularization.py b/pynumdiff/total_variation_regularization.py index 100ac1b..a3f406e 100644 --- a/pynumdiff/total_variation_regularization.py +++ b/pynumdiff/total_variation_regularization.py @@ -1,6 +1,6 @@ """This module implements some common total variation regularization methods.""" -import numpy as np from warnings import warn +import numpy as np from scipy.stats import median_abs_deviation try: import cvxpy except ImportError: pass @@ -33,14 +33,14 @@ def iterative_velocity(x, dt, params=None, options=None, num_iterations=None, ga :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `num_iterations`, " + "`gamma`, `cg_maxiter`, and `scale` instead.", DeprecationWarning) num_iterations, gamma = params - if options != None: + if options is not None: if 'cg_maxiter' in options: cg_maxiter = options['cg_maxiter'] if 'scale' in options: scale = options['scale'] - elif num_iterations == None or gamma == None: + elif num_iterations is None or gamma is None: raise ValueError("`num_iterations` and `gamma` must be given.") dxdt_hat = _chartrand_tvregdiff.TVRegDiff(x, num_iterations, gamma, dx=dt, @@ -126,13 +126,13 @@ def velocity(x, dt, params=None, options=None, gamma=None, solver=None): :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `gamma` " + "and `solver` instead.", DeprecationWarning) gamma = params[0] if isinstance(params, list) else params - if options != None: + if options is not None: if 'solver' in options: solver = options['solver'] - elif gamma == None: + elif gamma is None: raise ValueError("`gamma` must be given.") warn("`velocity` is deprecated. Call `tvrdiff` with order 1 instead.", DeprecationWarning) @@ -154,13 +154,13 @@ def acceleration(x, dt, params=None, options=None, gamma=None, solver=None): :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `gamma` " + "and `solver` instead.", DeprecationWarning) gamma = params[0] if isinstance(params, list) else params - if options != None: + if options is not None: if 'solver' in options: solver = options['solver'] - elif gamma == None: + elif gamma is None: raise ValueError("`gamma` must be given.") warn("`acceleration` is deprecated. Call `tvrdiff` with order 2 instead.", DeprecationWarning) @@ -182,13 +182,13 @@ def jerk(x, dt, params=None, options=None, gamma=None, solver=None): :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `gamma` " + "and `solver` instead.", DeprecationWarning) gamma = params[0] if isinstance(params, list) else params - if options != None: + if options is not None: if 'solver' in options: solver = options['solver'] - elif gamma == None: + elif gamma is None: raise ValueError("`gamma` must be given.") warn("`jerk` is deprecated. Call `tvrdiff` with order 3 instead.", DeprecationWarning) @@ -212,13 +212,13 @@ def smooth_acceleration(x, dt, params=None, options=None, gamma=None, window_siz :return: - **x_hat** (np.array) -- estimated (smoothed) x - **dxdt_hat** (np.array) -- estimated derivative of x """ - if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release. + if params is not None: # Warning to support old interface for a while. Remove these lines along with params in a future release. warn("`params` and `options` parameters will be removed in a future version. Use `gamma` " + "and `solver` instead.", DeprecationWarning) gamma, window_size = params - if options != None: + if options is not None: if 'solver' in options: solver = options['solver'] - elif gamma == None or window_size == None: + elif gamma is None or window_size is None: raise ValueError("`gamma` and `window_size` must be given.") _, dxdt_hat = tvrdiff(x, dt, 2, gamma, solver=solver) diff --git a/pynumdiff/utils/_chartrand_tvregdiff.py b/pynumdiff/utils/_chartrand_tvregdiff.py index f69bb0a..5d33568 100644 --- a/pynumdiff/utils/_chartrand_tvregdiff.py +++ b/pynumdiff/utils/_chartrand_tvregdiff.py @@ -1,5 +1,3 @@ -# pylint: skip-file - # u = TVRegDiff( data, iter, alph, u0, scale, ep, dx, plotflag, diagflag ); # # Rick Chartrand (rickc@lanl.gov), Apr. 10, 2011 diff --git a/pynumdiff/utils/evaluate.py b/pynumdiff/utils/evaluate.py index b755a4b..2a29219 100644 --- a/pynumdiff/utils/evaluate.py +++ b/pynumdiff/utils/evaluate.py @@ -27,7 +27,7 @@ def plot(x, dt, x_hat, dxdt_hat, x_truth, dxdt_truth, xlim=None, show_error=True xlim = [t[0], t[-1]] fig, axes = plt.subplots(1, 2, figsize=(18, 6), constrained_layout=True) - + axes[0].plot(t, x_truth, '--', color='black', linewidth=3, label=r"true $x$") axes[0].plot(t, x, '.', color='blue', zorder=-100, markersize=markersize, label=r"noisy data") axes[0].plot(t, x_hat, color='red', label=r"estimated $\hat{x}$") @@ -52,7 +52,7 @@ def plot(x, dt, x_hat, dxdt_hat, x_truth, dxdt_truth, xlim=None, show_error=True R_sqr = error_correlation(dxdt_truth, dxdt_hat) axes[1].text(0.05, 0.95, f"RMSE = {rmse_dxdt:.2f}\n$R^2$ = {R_sqr:.2g}", transform=axes[1].transAxes, fontsize=15, verticalalignment='top') - + return fig, axes @@ -128,7 +128,7 @@ def error_correlation(u, v, padding=0): """ if padding == 'auto': padding = max(1, int(0.025*len(u))) s = slice(padding, len(u)-padding) # slice out data we want to measure - + return stats.linregress(u[s], v[s] - u[s]).rvalue**2 @@ -143,5 +143,5 @@ def total_variation(x, padding=0): """ if padding == 'auto': padding = max(1, int(0.025*len(x))) x = x[padding:len(x)-padding] - + return np.linalg.norm(x[1:]-x[:-1], 1)/len(x) # normalized version of what cvxpy.tv does diff --git a/pynumdiff/utils/simulate.py b/pynumdiff/utils/simulate.py index d8931c2..0d1261d 100644 --- a/pynumdiff/utils/simulate.py +++ b/pynumdiff/utils/simulate.py @@ -144,7 +144,7 @@ def pop_dyn(duration=4, noise_type='normal', noise_parameters=(0, 0.5), outliers for _ in t[1:]: pos.append(pos[-1] + simdt*vel[-1]) vel.append(r*pos[-1]*(1-pos[-1]/K)) # logistic growth - + idx = slice(0, len(t), int(dt/simdt)) # downsample so things are dt apart pos = np.array(pos[idx]) vel = np.array(vel[idx]) @@ -252,7 +252,7 @@ def pi_cruise_control(duration=4, noise_type='normal', noise_parameters=(0, 0.5) pos = states[0, idx] vel = states[1, idx] noisy_pos = _add_noise(pos, random_seed, noise_type, noise_parameters, outliers) - + return noisy_pos, pos, vel diff --git a/pynumdiff/utils/utility.py b/pynumdiff/utils/utility.py index e8b66e6..6c027aa 100644 --- a/pynumdiff/utils/utility.py +++ b/pynumdiff/utils/utility.py @@ -1,4 +1,4 @@ -import os, sys, copy +"""Simple, short, reusable helper functions""" import numpy as np from scipy.integrate import cumulative_trapezoid from scipy.optimize import minimize