Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 14 additions & 18 deletions pynumdiff/basis_fit/_basis_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,8 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
:param bool pad_to_zero_dxdt: if True, extend the data with extra regions that smoothly force the derivative to
zero before taking FFT.

:return: tuple[np.array, np.array] of\n
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I decided I like a slightly shorter form for these.

- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if params != None: # Warning to support old interface for a while. Remove these lines along with params in a future release.
warn("`params` and `options` parameters will be removed in a future version. Use `high_freq_cutoff`, " +
Expand Down Expand Up @@ -52,27 +51,25 @@ def spectraldiff(x, dt, params=None, options=None, high_freq_cutoff=None, even_e
if even_extension is True:
x = np.hstack((x, x[::-1]))

# If odd, make N even, and pad x
# Form wavenumbers
N = len(x)

# Define the frequency range.
k = np.concatenate((np.arange(N//2 + 1), np.arange(-N//2 + 1, 0)))
if N % 2 == 0: k[N//2] = 0 # odd derivatives get the Nyquist element zeroed out
omega = k*2*np.pi/(dt*N) # turn wavenumbers into frequencies in radians/s

# Frequency based smoothing: remove signals with a frequency higher than high_freq_cutoff
# Filter to zero out higher wavenumbers
discrete_cutoff = int(high_freq_cutoff*N/2) # Nyquist is at N/2 location, and we're cutting off as a fraction of that
omega[discrete_cutoff:N-discrete_cutoff] = 0
filt = np.ones(k.shape); filt[discrete_cutoff:N-discrete_cutoff] = 0

# Smoothed signal
X = np.fft.fft(x)
Copy link
Collaborator Author

@pavelkomarov pavelkomarov Nov 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this code is a bit clearer and more like what people canonically do now. Integrating to get x_hat was always strange in this spectral case.

x_hat = np.real(np.fft.ifft(filt * X))
x_hat = x_hat[padding:L+padding]

# Derivative = 90 deg phase shift
dxdt_hat = np.real(np.fft.ifft(1.0j * omega * np.fft.fft(x)))
omega = 2*np.pi/(dt*N) # factor of 2pi/T turns wavenumbers into frequencies in radians/s
dxdt_hat = np.real(np.fft.ifft(1j * k * omega * filt * X))
dxdt_hat = dxdt_hat[padding:L+padding]

# Integrate to get x_hat
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
x0 = utility.estimate_integration_constant(x[padding:L+padding], x_hat)
x_hat = x_hat + x0

return x_hat, dxdt_hat


Expand All @@ -88,9 +85,8 @@ def rbfdiff(x, _t, sigma=1, lmbd=0.01):
:param float sigma: controls width of radial basis functions
:param float lmbd: controls smoothness

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if np.isscalar(_t):
t = np.arange(len(x))*_t
Expand Down
20 changes: 10 additions & 10 deletions pynumdiff/finite_difference/_finite_difference.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ def finitediff(x, dt, num_iterations=1, order=2):
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times
:param int order: 1, 2, or 4, controls which finite differencing scheme to employ

:return: - **x_hat** (np.array) -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if num_iterations < 1: raise ValueError("num_iterations must be >0")
if order not in [1, 2, 4]: raise ValueError("order must be 1, 2, or 4")
Expand Down Expand Up @@ -59,7 +62,7 @@ def finitediff(x, dt, num_iterations=1, order=2):
dxdt_hat /= dt # don't forget to scale by dt, can't skip it this time

if num_iterations > 1: # We've lost a constant of integration in the above
x_hat += utility.estimate_integration_constant(x, x_hat) # uses least squares
x_hat += utility.estimate_integration_constant(x, x_hat)
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This now emphatically does not use least squares, because that's actually a terrible way to try to estimate the constant of integration if there are outliers. It now uses an outlier-robust fancy-shmansy thing under the hood, which justifies having a whole separate function to this a little more too.


return x_hat, dxdt_hat

Expand All @@ -75,9 +78,8 @@ def first_order(x, dt, params=None, options={}, num_iterations=1):
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times

:return: tuple[np.array, np.array] of\n
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
warn("`first_order` in past releases was actually calculating a second-order FD. Use `second_order` to achieve " +
"approximately the same behavior. Note that odd-order methods have asymmetrical stencils, which causes " +
Expand All @@ -99,9 +101,8 @@ def second_order(x, dt, num_iterations=1):
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times

:return: tuple[np.array, np.array] of\n
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
warn("`second_order` is deprecated. Call `finitediff` with order 2 instead.", DeprecationWarning)
return finitediff(x, dt, num_iterations, 2)
Expand All @@ -116,9 +117,8 @@ def fourth_order(x, dt, num_iterations=1):
:param int num_iterations: number of iterations. If >1, the derivative is integrated with trapezoidal
rule, that result is finite-differenced again, and the cycle is repeated num_iterations-1 times

:return: tuple[np.array, np.array] of\n
- **x_hat** -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- original x if :code:`num_iterations=1`, else smoothed x that yielded dxdt_hat
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
warn("`fourth_order` is deprecated. Call `finitediff` with order 4 instead.", DeprecationWarning)
return finitediff(x, dt, num_iterations, 4)
74 changes: 32 additions & 42 deletions pynumdiff/kalman_smooth/_kalman_smooth.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,11 @@ def kalman_filter(y, _t, xhat0, P0, A, Q, C, R, B=None, u=None, save_P=True):
:param bool save_P: whether to save history of error covariance and a priori state estimates, used with rts
smoothing but nonstandard to compute for ordinary filtering

:return: tuple[np.array, np.array, np.array, np.array] of\n
- **xhat_pre** -- a priori estimates of xhat, with axis=0 the batch dimension, so xhat[n] gets the nth step
- **xhat_post** -- a posteriori estimates of xhat
- **P_pre** -- a priori estimates of P
- **P_post** -- a posteriori estimates of P
if :code:`save_P` is :code:`True` else only **xhat_post** to save memory
:return: - **xhat_pre** (np.array) -- a priori estimates of xhat, with axis=0 the batch dimension, so xhat[n] gets the nth step
- **xhat_post** (np.array) -- a posteriori estimates of xhat
- **P_pre** (np.array) -- a priori estimates of P
- **P_post** (np.array) -- a posteriori estimates of P
if :code:`save_P` is :code:`True` else only **xhat_post** to save memory
"""
N = y.shape[0]
m = xhat0.shape[0] # dimension of the state
Expand Down Expand Up @@ -84,16 +83,15 @@ def rts_smooth(_t, A, xhat_pre, xhat_post, P_pre, P_post, compute_P_smooth=True)
:param float or array[float] _t: This function supports variable step size. This parameter is either the constant
step size if given as a single float, or sample locations if given as an array of same length as the state histories.
:param np.array A: state transition matrix, in discrete time if constant dt, in continuous time if variable dt
:param list[np.array] xhat_pre: a priori estimates of xhat from a kalman_filter forward pass
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I turned these into 3D arrays rather than lists of 2D arrays a while ago and hadn't updated the docstring.

:param list[np.array] xhat_post: a posteriori estimates of xhat from a kalman_filter forward pass
:param list[np.array] P_pre: a priori estimates of P from a kalman_filter forward pass
:param list[np.array] P_post: a posteriori estimates of P from a kalman_filter forward pass
:param np.array xhat_pre: a priori estimates of xhat from a kalman_filter forward pass
:param np.array xhat_post: a posteriori estimates of xhat from a kalman_filter forward pass
:param np.array P_pre: a priori estimates of P from a kalman_filter forward pass
:param np.array P_post: a posteriori estimates of P from a kalman_filter forward pass
:param bool compute_P_smooth: it's extra work if you don't need it

:return: tuple[np.array, np.array] of\n
- **xhat_smooth** -- RTS smoothed xhat
- **P_smooth** -- RTS smoothed P
if :code:`compute_P_smooth` is :code:`True` else only **xhat_smooth**
:return: - **xhat_smooth** (np.array) -- RTS smoothed xhat
- **P_smooth** (np.array) -- RTS smoothed P estimates
if :code:`compute_P_smooth` is :code:`True` else only **xhat_smooth**
"""
xhat_smooth = np.empty(xhat_post.shape); xhat_smooth[-1] = xhat_post[-1] # preallocate arrays
if compute_P_smooth: P_smooth = np.empty(P_post.shape); P_smooth[-1] = P_post[-1]
Expand Down Expand Up @@ -123,9 +121,8 @@ def rtsdiff(x, _t, order, log_qr_ratio, forwardbackward):
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
(usually achieves better estimate at end points)

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if not np.isscalar(_t) and len(x) != len(_t):
raise ValueError("If `_t` is given as array-like, must have same length as `x`.")
Expand Down Expand Up @@ -185,9 +182,8 @@ def constant_velocity(x, dt, params=None, options=None, r=None, q=None, forwardb
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
(usually achieves better estimate at end points)

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if params != None: # boilerplate backwards compatibility code
warn("`params` and `options` parameters will be removed in a future version. Use `r`, " +
Expand Down Expand Up @@ -216,9 +212,8 @@ def constant_acceleration(x, dt, params=None, options=None, r=None, q=None, forw
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
(usually achieves better estimate at end points)

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if params != None: # boilerplate backwards compatibility code
warn("`params` and `options` parameters will be removed in a future version. Use `r`, " +
Expand Down Expand Up @@ -247,9 +242,8 @@ def constant_jerk(x, dt, params=None, options=None, r=None, q=None, forwardbackw
:param bool forwardbackward: indicates whether to run smoother forwards and backwards
(usually achieves better estimate at end points)

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if params != None: # boilerplate backwards compatibility code
warn("`params` and `options` parameters will be removed in a future version. Use `r`, " +
Expand Down Expand Up @@ -290,13 +284,9 @@ def robustdiff(x, dt, order, log_q, log_r, proc_huberM=6, meas_huberM=0):
:param float proc_huberM: quadratic-to-linear transition point for process loss
:param float meas_huberM: quadratic-to-linear transition point for measurement loss

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
#q = 1e4 # I found q too small worsened condition number of the Q matrix, so fixing it at a biggish value
#r = q/qr_ratio

A_c = np.diag(np.ones(order), 1) # continuous-time A just has 1s on the first diagonal (where 0th is main diagonal)
Q_c = np.zeros(A_c.shape); Q_c[-1,-1] = 10**log_q # continuous-time uncertainty around the last derivative
C = np.zeros((1, order+1)); C[0,0] = 1 # we measure only y = noisy x
Expand Down Expand Up @@ -324,31 +314,31 @@ def convex_smooth(y, A, Q, C, R, proc_huberM, meas_huberM):
:param float proc_huberM: Huber loss parameter. :math:`M=0` reduces to :math:`\\sqrt{2}\\ell_1`.
:param float meas_huberM: Huber loss parameter. :math:`M=\\infty` reduces to :math:`\\frac{1}{2}\\ell_2^2`.

:return: np.array -- state estimates (state_dim x N)
:return: (np.array) -- state estimates (state_dim x N)
"""
N = len(y)
x_states = cvxpy.Variable((A.shape[0], N)) # each column is [position, velocity, acceleration, ...] at step n

def huber_const(M): # from https://jmlr.org/papers/volume14/aravkin13a/aravkin13a.pdf, with correction for missing sqrt
a = 2*np.exp(-M**2 / 2)/M
a = 2*np.exp(-M**2 / 2)/M # huber_const smoothly transitions Huber between 1-norm and 2-norm squared cases
b = np.sqrt(2*np.pi)*(2*norm.cdf(M) - 1)
return np.sqrt((2*a*(1 + M**2)/M**2 + b)/(a + b))

# It is extremely important to run time that CVXPY expressions be in vectorized form
proc_resids = np.linalg.inv(sqrtm(Q)) @ (x_states[:,1:] - A @ x_states[:,:-1]) # all Q^(-1/2)(x_n - A x_{n-1})
meas_resids = np.linalg.inv(sqrtm(R)) @ (y.reshape(C.shape[0],-1) - C @ x_states) # all R^(-1/2)(y_n - C x_n)
# Process terms: sum of J(proc_resids)
objective = 0.5*cvxpy.sum_squares(proc_resids) if huberM == float('inf') \
else np.sqrt(2)*cvxpy.sum(cvxpy.abs(proc_resids)) if huberM < 1e-3 \
else huber_const(huberM)*cvxpy.sum(cvxpy.huber(proc_resids, huberM)) # 1/2 l2^2, l1, or Huber
objective = 0.5*cvxpy.sum_squares(proc_resids) if proc_huberM == float('inf') \
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have to try out whether splitting up proc_huberM and meas_huberM etc gives better results, but I'm reasonably sure we need Nelder-Mead to do a 4D search here, which means it makes a lot of queries. Thankfully, each query is not fast, thanks to vectorized CVXPY code and CLARABEL taking full advantage of sparse matrices.

else np.sqrt(2)*cvxpy.sum(cvxpy.abs(proc_resids)) if proc_huberM < 1e-3 \
else huber_const(proc_huberM)*cvxpy.sum(cvxpy.huber(proc_resids, proc_huberM)) # 1/2 l2^2, l1, or Huber
# Measurement terms: sum of V(meas_resids)
objective += 0.5*cvxpy.sum_squares(meas_resids) if huberM == float('inf') \
else np.sqrt(2)*cvxpy.sum(cvxpy.abs(meas_resids)) if huberM < 1e-3 \
else huber_const(huberM)*cvxpy.sum(cvxpy.huber(meas_resids, huberM)) # CVXPY quirk: norm(, 1) != sum(abs()) for matrices
objective += 0.5*cvxpy.sum_squares(meas_resids) if meas_huberM == float('inf') \
else np.sqrt(2)*cvxpy.sum(cvxpy.abs(meas_resids)) if meas_huberM < 1e-3 \
else huber_const(meas_huberM)*cvxpy.sum(cvxpy.huber(meas_resids, meas_huberM)) # CVXPY quirk: norm(, 1) != sum(abs()) for matrices

problem = cvxpy.Problem(cvxpy.Minimize(objective))
try: problem.solve(solver=cvxpy.CLARABEL); print("CLARABEL succeeded")
except cvxpy.error.SolverError: pass # Could try a different solver here, like SCS, but slows things down
except cvxpy.error.SolverError: pass # Could try another solver here, like SCS, but slows things down

if x_states.value is None: return np.full((A.shape[0], N), np.nan) # There can be solver failure, even without exception
if x_states.value is None: return np.full((A.shape[0], N), np.nan) # There can be solver failure, even without error
return x_states.value
5 changes: 2 additions & 3 deletions pynumdiff/linear_model/_linear_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,8 @@ def lineardiff(x, dt, params=None, options=None, order=None, gamma=None, window_
:param str kernel: name of kernel to use for weighting and smoothing windows ('gaussian' or 'friedrichs')
:param str solver: CVXPY solver to use, one of :code:`cvxpy.installed_solvers()`

:return: tuple[np.array, np.array] of\n
- **x_hat** -- estimated (smoothed) x
- **dxdt_hat** -- estimated derivative of x
:return: - **x_hat** (np.array) -- estimated (smoothed) x
- **dxdt_hat** (np.array) -- estimated derivative of x
"""
if params != None:
warn("`params` and `options` parameters will be removed in a future version. Use `order`, " +
Expand Down
Loading