Skip to content

Commit f8b840c

Browse files
committed
extended savgoldiff to also be multidimensional
1 parent 4cf2728 commit f8b840c

File tree

3 files changed

+17
-12
lines changed

3 files changed

+17
-12
lines changed

notebooks/6_multidimensionality_demo.ipynb

Lines changed: 7 additions & 5 deletions
Large diffs are not rendered by default.

pynumdiff/polynomial_fit.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -100,10 +100,10 @@ def _polydiff(x, dt, degree, weights=None):
100100
return utility.slide_function(_polydiff, x, dt, kernel, degree, stride=step_size, pass_weights=True)
101101

102102

103-
def savgoldiff(x, dt, params=None, options=None, degree=None, window_size=None, smoothing_win=None):
103+
def savgoldiff(x, dt, params=None, options=None, degree=None, window_size=None, smoothing_win=None, axis=0):
104104
"""Use the Savitzky-Golay to smooth the data and calculate the first derivative. It uses
105105
scipy.signal.savgol_filter. The Savitzky-Golay is very similar to the sliding polynomial fit,
106-
but slightly noisier, and much faster.
106+
but slightly noisier and much faster.
107107
108108
:param np.array[float] x: data to differentiate
109109
:param float dt: step size
@@ -113,6 +113,7 @@ def savgoldiff(x, dt, params=None, options=None, degree=None, window_size=None,
113113
:param int window_size: size of the sliding window, must be odd (if not, 1 is added)
114114
:param int smoothing_win: size of the window used for gaussian smoothing, a good default is
115115
window_size, but smaller for high frequnecy data
116+
:param int axis: data dimension along which differentiation is performed
116117
117118
:return: - **x_hat** (np.array) -- estimated (smoothed) x
118119
- **dxdt_hat** (np.array) -- estimated derivative of x
@@ -130,12 +131,12 @@ def savgoldiff(x, dt, params=None, options=None, degree=None, window_size=None,
130131
warn("Kernel window size should be odd. Added 1 to length.")
131132
smoothing_win = min(smoothing_win, len(x)-1)
132133

133-
dxdt_hat = scipy.signal.savgol_filter(x, window_size, degree, deriv=1)/dt
134+
dxdt_hat = scipy.signal.savgol_filter(x, window_size, degree, deriv=1, axis=axis)/dt
134135

135136
kernel = utility.gaussian_kernel(smoothing_win)
136-
dxdt_hat = utility.convolutional_smoother(dxdt_hat, kernel)
137+
dxdt_hat = utility.convolutional_smoother(dxdt_hat, kernel, axis=axis)
137138

138-
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
139-
x_hat += utility.estimate_integration_constant(x, x_hat)
139+
x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt, axis=axis)
140+
x_hat += utility.estimate_integration_constant(x, x_hat, axis=axis)
140141

141142
return x_hat, dxdt_hat

pynumdiff/tests/test_diff_methods.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,7 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re
308308
(kerneldiff, {'kernel': 'gaussian', 'window_size': 5}),
309309
(butterdiff, {'filter_order': 3, 'cutoff_freq': 1 - 1e-6}),
310310
(finitediff, {}),
311+
(savgoldiff, {'degree': 3, 'window_size': 11, 'smoothing_win': 3})
311312
]
312313

313314
# Similar to the error_bounds table, index by method first. But then we test against only one 2D function,
@@ -317,7 +318,8 @@ def test_diff_method(diff_method_and_params, test_func_and_deriv, request): # re
317318
multidim_error_bounds = {
318319
kerneldiff: [(2, 1), (3, 2)],
319320
butterdiff: [(0, -1), (1, -1)],
320-
finitediff: [(0, -1), (1, -1)]
321+
finitediff: [(0, -1), (1, -1)],
322+
savgoldiff: [(0, -1), (1, 1)]
321323
}
322324

323325
@mark.parametrize("multidim_method_and_params", multidim_methods_and_params)

0 commit comments

Comments
 (0)