@@ -100,10 +100,10 @@ def _polydiff(x, dt, degree, weights=None):
100100 return utility .slide_function (_polydiff , x , dt , kernel , degree , stride = step_size , pass_weights = True )
101101
102102
103- def savgoldiff (x , dt , params = None , options = None , degree = None , window_size = None , smoothing_win = None ):
103+ def savgoldiff (x , dt , params = None , options = None , degree = None , window_size = None , smoothing_win = None , axis = 0 ):
104104 """Use the Savitzky-Golay to smooth the data and calculate the first derivative. It uses
105105 scipy.signal.savgol_filter. The Savitzky-Golay is very similar to the sliding polynomial fit,
106- but slightly noisier, and much faster.
106+ but slightly noisier and much faster.
107107
108108 :param np.array[float] x: data to differentiate
109109 :param float dt: step size
@@ -113,6 +113,7 @@ def savgoldiff(x, dt, params=None, options=None, degree=None, window_size=None,
113113 :param int window_size: size of the sliding window, must be odd (if not, 1 is added)
114114 :param int smoothing_win: size of the window used for gaussian smoothing, a good default is
115115 window_size, but smaller for high frequnecy data
116+ :param int axis: data dimension along which differentiation is performed
116117
117118 :return: - **x_hat** (np.array) -- estimated (smoothed) x
118119 - **dxdt_hat** (np.array) -- estimated derivative of x
@@ -130,12 +131,12 @@ def savgoldiff(x, dt, params=None, options=None, degree=None, window_size=None,
130131 warn ("Kernel window size should be odd. Added 1 to length." )
131132 smoothing_win = min (smoothing_win , len (x )- 1 )
132133
133- dxdt_hat = scipy .signal .savgol_filter (x , window_size , degree , deriv = 1 )/ dt
134+ dxdt_hat = scipy .signal .savgol_filter (x , window_size , degree , deriv = 1 , axis = axis )/ dt
134135
135136 kernel = utility .gaussian_kernel (smoothing_win )
136- dxdt_hat = utility .convolutional_smoother (dxdt_hat , kernel )
137+ dxdt_hat = utility .convolutional_smoother (dxdt_hat , kernel , axis = axis )
137138
138- x_hat = utility .integrate_dxdt_hat (dxdt_hat , dt )
139- x_hat += utility .estimate_integration_constant (x , x_hat )
139+ x_hat = utility .integrate_dxdt_hat (dxdt_hat , dt , axis = axis )
140+ x_hat += utility .estimate_integration_constant (x , x_hat , axis = axis )
140141
141142 return x_hat , dxdt_hat
0 commit comments