Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pynumdiff/finite_difference/_finite_difference.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def finitediff(x, dt, num_iterations, order):
dxdt_hat[0] = x_hat[1] - x_hat[0]
dxdt_hat[-1] = x_hat[-1] - x_hat[-2] # use first-order endpoint formulas so as not to amplify noise. See #104

x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt=1) # estimate new x_hat by integrating derivative
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

changed the name of this thing, so kwarg breaks.

x_hat = utility.integrate_dxdt_hat(dxdt_hat, 1) # estimate new x_hat by integrating derivative
# We can skip dividing by dt here and pass dt=1, because the integration multiplies dt back in.
# No need to find integration constant until the very end, because we just differentiate again.
# Note that I also tried integrating with Simpson's rule here, and it seems to do worse. See #104
Expand Down
2 changes: 1 addition & 1 deletion pynumdiff/linear_model/_linear_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ def rbfdiff(x, _t, sigma=1, lmbd=0.01):
if len(x) != len(_t): raise ValueError("If `_t` is given as array-like, must have same length as `x`.")
t = _t

# The below does the approximate equivalent of this code, but sparsely in O(N sigma), since the rbf falls off rapidly
# The below does the approximate equivalent of this code, but sparsely in O(N sigma^2), since the rbf falls off rapidly
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Solving a sparse linear system with $d$ elements per row is $O(Nd^2)$.

# t_i, t_j = np.meshgrid(t,t)
# r = t_j - t_i # radius
# rbf = np.exp(-(r**2) / (2 * sigma**2)) # radial basis function kernel
Expand Down
10 changes: 10 additions & 0 deletions pynumdiff/tests/test_optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from ..linear_model import spectraldiff
from ..polynomial_fit import polydiff, savgoldiff, splinediff
from ..total_variation_regularization import velocity, acceleration, iterative_velocity
from ..kalman_smooth import rtsdiff
from ..optimize import optimize
from ..utils.simulate import pi_cruise_control

Expand Down Expand Up @@ -92,3 +93,12 @@ def test_polydiff():
params2, val2 = optimize(polydiff, x, dt, tvgamma=tvgamma, search_space_updates={'step_size':1}, padding='auto')
assert (params1['degree'], params1['window_size'], params1['kernel']) == (6, 50, 'friedrichs')
assert (params2['degree'], params2['window_size'], params2['kernel']) == (3, 10, 'gaussian')

# This test runs in a reasonable amount of time locally but for some reason takes forever in CI
# def test_rtsdiff_with_irregular_step():
# t = np.arange(len(x))*dt; np.random.seed(7) # seed so the test can't randomly fail
# t_irreg = t + np.random.uniform(-dt/10, dt/10, *t.shape) # add jostle
# params1, val1 = optimize(rtsdiff, x, t, dxdt_truth=dxdt_truth)
# params2, val2 = optimize(rtsdiff, x, t_irreg, dxdt_truth=dxdt_truth)
# assert val2 < 1.15*val1 # optimization works and comes out similar, since jostle is small
# assert params1['qr_ratio']*0.85 < params2['qr_ratio'] < params1['qr_ratio']*1.15
16 changes: 9 additions & 7 deletions pynumdiff/utils/utility.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import os, sys, copy, scipy
import os, sys, copy
import numpy as np
from scipy.integrate import cumulative_trapezoid
from scipy.optimize import minimize


def hankel_matrix(x, num_delays, pad=False): # fixed delay step of 1
Expand Down Expand Up @@ -95,16 +97,16 @@ def peakdet(x, delta, t=None):


# Trapazoidal integration, with 0 first value so that the lengths match. See #88.
def integrate_dxdt_hat(dxdt_hat, dt):
"""Wrapper for scipy.integrate.cumulative_trapezoid to integrate dxdt_hat that ensures
the integral has the same length
def integrate_dxdt_hat(dxdt_hat, _t):
"""Wrapper for scipy.integrate.cumulative_trapezoid

:param np.array[float] dxdt_hat: estimate derivative of timeseries
:param float dt: time step in seconds
:param float _t: stepsize if given as a scalar or a vector of sample locations

:return: **x_hat** (np.array[float]) -- integral of dxdt_hat
"""
return np.hstack((0, scipy.integrate.cumulative_trapezoid(dxdt_hat)))*dt
return cumulative_trapezoid(dxdt_hat, initial=0)*_t if np.isscalar(_t) \
Copy link
Collaborator Author

@pavelkomarov pavelkomarov Aug 23, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Turns out cumulative_trapezoid can also take an initial value, in which case it returns a result the same length as the thing it's integrating, so there is no need to call hstack.

else cumulative_trapezoid(dxdt_hat, x=_t, initial=0)


# Optimization routine to estimate the integration constant.
Expand All @@ -118,7 +120,7 @@ def estimate_integration_constant(x, x_hat):

:return: **integration constant** (float) -- initial condition that best aligns x_hat with x
"""
return scipy.optimize.minimize(lambda x0, x, xhat: np.linalg.norm(x - (x_hat+x0)), # fn to minimize in 1st argument
return minimize(lambda x0, x, xhat: np.linalg.norm(x - (x_hat+x0)), # fn to minimize in 1st argument
0, args=(x, x_hat), method='SLSQP').x[0] # result is a vector, even if initial guess is just a scalar


Expand Down