Skip to content

Commit 0c01ef8

Browse files
committed
Merge branch 'release/2.6.0'
2 parents b2fb64a + f290617 commit 0c01ef8

File tree

12 files changed

+899
-134
lines changed

12 files changed

+899
-134
lines changed

brainbox/behavior/dlc.py

Lines changed: 329 additions & 7 deletions
Large diffs are not rendered by default.

brainbox/io/one.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212

1313
from ibllib.io import spikeglx
1414
from ibllib.io.extractors.training_wheel import extract_wheel_moves, extract_first_movement_times
15-
from ibllib.ephys.neuropixel import SITES_COORDINATES, TIP_SIZE_UM
15+
from ibllib.ephys.neuropixel import SITES_COORDINATES, TIP_SIZE_UM, trace_header
1616
from ibllib.atlas import atlas
1717
from ibllib.atlas import AllenAtlas
1818
from ibllib.pipes import histology
@@ -221,7 +221,7 @@ def _load_channels_locations_from_disk(eid, collection=None, one=None, revision=
221221
return channels
222222

223223

224-
def channel_locations_interpolation(channels_aligned, channels, brain_regions=None):
224+
def channel_locations_interpolation(channels_aligned, channels=None, brain_regions=None):
225225
"""
226226
oftentimes the channel map for different spike sorters may be different so interpolate the alignment onto
227227
if there is no spike sorting in the base folder, the alignment doesn't have the localCoordinates field
@@ -238,16 +238,18 @@ def channel_locations_interpolation(channels_aligned, channels, brain_regions=No
238238
'x', 'y', 'z', 'acronym', 'atlas_id', 'axial_um', 'lateral_um'
239239
:return: Bunch or dictionary of channels with brain coordinates keys
240240
"""
241+
NEUROPIXEL_VERSION = 1
242+
h = trace_header(version=NEUROPIXEL_VERSION)
243+
if channels is None:
244+
channels = {'localCoordinates': np.c_[h['x'], h['y']]}
241245
nch = channels['localCoordinates'].shape[0]
242246
if set(['x', 'y', 'z']).issubset(set(channels_aligned.keys())):
243247
channels_aligned = _channels_bunch2alf(channels_aligned)
244248
if 'localCoordinates' in channels_aligned.keys():
245249
aligned_depths = channels_aligned['localCoordinates'][:, 1]
246250
else: # this is a edge case for a few spike sorting sessions
247251
assert channels_aligned['mlapdv'].shape[0] == 384
248-
NEUROPIXEL_VERSION = 1
249-
from ibllib.ephys.neuropixel import trace_header
250-
aligned_depths = trace_header(version=NEUROPIXEL_VERSION)['y']
252+
aligned_depths = h['y']
251253
depth_aligned, ind_aligned = np.unique(aligned_depths, return_index=True)
252254
depths, ind, iinv = np.unique(channels['localCoordinates'][:, 1], return_index=True, return_inverse=True)
253255
channels['mlapdv'] = np.zeros((nch, 3))

ibllib/dsp/voltage.py

Lines changed: 43 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -109,6 +109,37 @@ def fk(x, si=.002, dx=1, vbounds=None, btype='highpass', ntr_pad=0, ntr_tap=None
109109
return xf / gain
110110

111111

112+
def car(x, collection=None, lagc=300, butter_kwargs=None):
113+
"""
114+
Applies common average referencing with optional automatic gain control
115+
:param x: the input array to be filtered. dimension, the filtering is considering
116+
axis=0: spatial dimension, axis=1 temporal dimension. (ntraces, ns)
117+
:param collection:
118+
:param lagc: window size for time domain automatic gain control (no agc otherwise)
119+
:param butter_kwargs: filtering parameters: defaults: {'N': 3, 'Wn': 0.1, 'btype': 'highpass'}
120+
:return:
121+
"""
122+
if butter_kwargs is None:
123+
butter_kwargs = {'N': 3, 'Wn': 0.1, 'btype': 'highpass'}
124+
if collection is not None:
125+
xout = np.zeros_like(x)
126+
for c in np.unique(collection):
127+
sel = collection == c
128+
xout[sel, :] = kfilt(x=x[sel, :], ntr_pad=0, ntr_tap=None, collection=None,
129+
butter_kwargs=butter_kwargs)
130+
return xout
131+
132+
# apply agc and keep the gain in handy
133+
if not lagc:
134+
xf = np.copy(x)
135+
gain = 1
136+
else:
137+
xf, gain = agc(x, wl=lagc, si=1.0)
138+
# apply CAR and then un-apply the gain
139+
xf = xf - np.median(xf, axis=0)
140+
return xf / gain
141+
142+
112143
def kfilt(x, collection=None, ntr_pad=0, ntr_tap=None, lagc=300, butter_kwargs=None):
113144
"""
114145
Applies a butterworth filter on the 0-axis with tapering / padding
@@ -209,13 +240,19 @@ def destripe(x, fs, neuropixel_version=1, butter_kwargs=None, k_kwargs=None, cha
209240
True: deduces the bad channels from the data provided
210241
:param butter_kwargs: (optional, None) butterworth params, see the code for the defaults dict
211242
:param k_kwargs: (optional, None) K-filter params, see the code for the defaults dict
243+
can also be set to 'car', in which case the median accross channels will be subtracted
212244
:return: x, filtered array
213245
"""
214246
if butter_kwargs is None:
215247
butter_kwargs = {'N': 3, 'Wn': 300 / fs * 2, 'btype': 'highpass'}
216248
if k_kwargs is None:
217249
k_kwargs = {'ntr_pad': 60, 'ntr_tap': 0, 'lagc': 3000,
218250
'butter_kwargs': {'N': 3, 'Wn': 0.01, 'btype': 'highpass'}}
251+
spatial_fcn = lambda dat: kfilt(dat, **k_kwargs) # noqa
252+
elif isinstance(k_kwargs, dict):
253+
spatial_fcn = lambda dat: kfilt(dat, **k_kwargs) # noqa
254+
else:
255+
spatial_fcn = lambda dat: car(dat, lagc=int(0.1 * fs)) # noqa
219256
h = neuropixel.trace_header(version=neuropixel_version)
220257
if channel_labels is True:
221258
channel_labels, _ = detect_bad_channels(x, fs)
@@ -231,9 +268,9 @@ def destripe(x, fs, neuropixel_version=1, butter_kwargs=None, k_kwargs=None, cha
231268
if channel_labels is not None:
232269
x = interpolate_bad_channels(x, channel_labels, h)
233270
inside_brain = np.where(channel_labels != 3)[0]
234-
x[inside_brain, :] = kfilt(x[inside_brain, :], **k_kwargs) # apply the k-filter
271+
x[inside_brain, :] = spatial_fcn(x[inside_brain, :]) # apply the k-filter
235272
else:
236-
x = kfilt(x, **k_kwargs)
273+
x = spatial_fcn(x)
237274
return x
238275

239276

@@ -245,7 +282,7 @@ def decompress_destripe_cbin(sr_file, output_file=None, h=None, wrot=None, appen
245282
Production version with optimized FFTs - requires pyfftw
246283
:param sr: seismic reader object (spikeglx.Reader)
247284
:param output_file: (optional, defaults to .bin extension of the compressed bin file)
248-
:param h: (optional)
285+
:param h: (optional) neuropixel trace header. Dictionary with key 'sample_shift'
249286
:param wrot: (optional) whitening matrix [nc x nc] or amplitude scalar to apply to the output
250287
:param append: (optional, False) for chronic recordings, append to end of file
251288
:param nc_out: (optional, True) saves non selected channels (synchronisation trace) in output
@@ -273,7 +310,7 @@ def decompress_destripe_cbin(sr_file, output_file=None, h=None, wrot=None, appen
273310
k_kwargs = {'ntr_pad': 60, 'ntr_tap': 0, 'lagc': 3000,
274311
'butter_kwargs': {'N': 3, 'Wn': 0.01, 'btype': 'highpass'}}
275312
h = neuropixel.trace_header(version=1) if h is None else h
276-
ncv = h['x'].size # number of channels
313+
ncv = h['sample_shift'].size # number of channels
277314
output_file = sr.file_bin.with_suffix('.bin') if output_file is None else output_file
278315
assert output_file != sr.file_bin
279316
taper = np.r_[0, scipy.signal.windows.cosine((SAMPLES_TAPER - 1) * 2), 0]
@@ -504,7 +541,7 @@ def nxcor(x, ref):
504541
xcor = channels_similarity(raw)
505542
fscale, psd = scipy.signal.welch(raw * 1e6, fs=fs) # units; uV ** 2 / Hz
506543

507-
sos_hp = scipy.signal.butter(**{'N': 3, 'Wn': 1000 / fs / 2, 'btype': 'highpass'}, output='sos')
544+
sos_hp = scipy.signal.butter(**{'N': 3, 'Wn': 300 / fs * 2, 'btype': 'highpass'}, output='sos')
508545
hf = scipy.signal.sosfiltfilt(sos_hp, raw)
509546
xcorf = channels_similarity(hf)
510547

@@ -513,7 +550,7 @@ def nxcor(x, ref):
513550
'rms_raw': rms(raw), # very similar to the rms avfter butterworth filter
514551
'xcor_hf': detrend(xcor, 11),
515552
'xcor_lf': xcorf - detrend(xcorf, 11) - 1,
516-
'psd_hf': np.mean(psd[:, fscale > 12000], axis=-1),
553+
'psd_hf': np.mean(psd[:, fscale > (fs / 2 * 0.8)], axis=-1), # 80% nyquists
517554
})
518555

519556
# make recommendation

ibllib/io/extractors/camera.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -553,7 +553,7 @@ def groom_pin_state(gpio, audio, ts, tolerance=2., display=False, take='first',
553553
downs = ts[high2low] - ts[high2low][0]
554554
offsets = audio_times[1::2] - audio_times[1]
555555
assigned = attribute_times(offsets, downs, tol=tolerance, take=take)
556-
unassigned = np.setdiff1d(np.arange(onsets.size), assigned[assigned > -1])
556+
unassigned = np.setdiff1d(np.arange(offsets.size), assigned[assigned > -1])
557557
if unassigned.size > 0:
558558
_logger.debug(f'{unassigned.size} audio TTL falls were not detected by the camera')
559559
# Check that all pin state downticks could be attributed to an offset TTL

ibllib/io/spikeglx.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -478,6 +478,11 @@ def _geometry_from_meta(meta_data):
478478
"""
479479
cm = _map_channels_from_meta(meta_data)
480480
major_version = _get_neuropixel_major_version_from_meta(meta_data)
481+
if cm is None:
482+
_logger.warning("Meta data doesn't have geometry (snsShankMap field), returning defaults")
483+
th = neuropixel.trace_header(version=major_version)
484+
th['flag'] = th['x'] * 0 + 1.
485+
return th
481486
th = cm.copy()
482487
if major_version == 1:
483488
# the spike sorting channel maps have a flipped version of the channel map

ibllib/pipes/ephys_preprocessing.py

Lines changed: 87 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99

1010
import numpy as np
1111
import pandas as pd
12+
1213
import one.alf.io as alfio
1314

1415
from ibllib.misc import check_nvidia_driver
@@ -24,6 +25,8 @@
2425
from ibllib.qc.camera import run_all_qc as run_camera_qc
2526
from ibllib.qc.dlc import DlcQC
2627
from ibllib.dsp import rms
28+
from ibllib.plots.figures import dlc_qc_plot
29+
from ibllib.plots.snapshot import ReportSnapshot
2730
from brainbox.behavior.dlc import likelihood_threshold, get_licks, get_pupil_diameter, get_smooth_pupil_diameter
2831

2932
_logger = logging.getLogger("ibllib")
@@ -871,85 +874,104 @@ class EphysPostDLC(tasks.Task):
871874
('_ibl_rightCamera.times.npy', 'alf', True),
872875
('_ibl_leftCamera.times.npy', 'alf', True),
873876
('_ibl_bodyCamera.times.npy', 'alf', True)],
877+
# More files are required for all panels of the DLC QC plot to function
874878
'output_files': [('_ibl_leftCamera.features.pqt', 'alf', True),
875879
('_ibl_rightCamera.features.pqt', 'alf', True),
876880
('licks.times.npy', 'alf', True)]
877881
}
878882

879-
def _run(self, overwrite=False, run_qc=True):
883+
def _run(self, overwrite=False, run_qc=True, plot_qc=True):
880884
# Check if output files exist locally
881885
exist, output_files = self.assert_expected(self.signature['output_files'], silent=True)
882886
if exist and not overwrite:
883-
_logger.warning('EphysPostDLC outputs exist and overwrite=False, skipping.')
884-
return output_files
885-
if exist and overwrite:
886-
_logger.warning('EphysPostDLC outputs exist and overwrite=True, overwriting existing outputs.')
887-
# Find all available dlc traces and dlc times
888-
dlc_files = list(Path(self.session_path).joinpath('alf').glob('_ibl_*Camera.dlc.*'))
889-
for dlc_file in dlc_files:
890-
_logger.debug(dlc_file)
891-
output_files = []
892-
combined_licks = []
893-
894-
for dlc_file in dlc_files:
895-
# Catch unforeseen exceptions and move on to next cam
896-
try:
897-
cam = label_from_path(dlc_file)
898-
# load dlc trace and camera times
899-
dlc = pd.read_parquet(dlc_file)
900-
dlc_thresh = likelihood_threshold(dlc, 0.9)
901-
# try to load respective camera times
887+
_logger.warning('EphysPostDLC outputs exist and overwrite=False, skipping computations of outputs.')
888+
else:
889+
if exist and overwrite:
890+
_logger.warning('EphysPostDLC outputs exist and overwrite=True, overwriting existing outputs.')
891+
# Find all available dlc traces and dlc times
892+
dlc_files = list(Path(self.session_path).joinpath('alf').glob('_ibl_*Camera.dlc.*'))
893+
for dlc_file in dlc_files:
894+
_logger.debug(dlc_file)
895+
output_files = []
896+
combined_licks = []
897+
898+
for dlc_file in dlc_files:
899+
# Catch unforeseen exceptions and move on to next cam
902900
try:
903-
dlc_t = np.load(next(Path(self.session_path).joinpath('alf').glob(f'_ibl_{cam}Camera.times.*npy')))
904-
times = True
905-
except StopIteration:
906-
_logger.error(f'No camera.times found for {cam} camera. '
907-
f'Computations using camera.times will be skipped')
908-
self.status = -1
909-
times = False
910-
911-
# These features are only computed from left and right cam
912-
if cam in ('left', 'right'):
913-
features = pd.DataFrame()
914-
# If camera times are available, get the lick time stamps for combined array
915-
if times:
916-
_logger.info(f"Computing lick times for {cam} camera.")
917-
combined_licks.append(get_licks(dlc_thresh, dlc_t))
901+
cam = label_from_path(dlc_file)
902+
# load dlc trace and camera times
903+
dlc = pd.read_parquet(dlc_file)
904+
dlc_thresh = likelihood_threshold(dlc, 0.9)
905+
# try to load respective camera times
906+
try:
907+
dlc_t = np.load(next(Path(self.session_path).joinpath('alf').glob(f'_ibl_{cam}Camera.times.*npy')))
908+
times = True
909+
except StopIteration:
910+
_logger.error(f'No camera.times found for {cam} camera. '
911+
f'Computations using camera.times will be skipped')
912+
self.status = -1
913+
times = False
914+
915+
# These features are only computed from left and right cam
916+
if cam in ('left', 'right'):
917+
features = pd.DataFrame()
918+
# If camera times are available, get the lick time stamps for combined array
919+
if times:
920+
_logger.info(f"Computing lick times for {cam} camera.")
921+
combined_licks.append(get_licks(dlc_thresh, dlc_t))
922+
else:
923+
_logger.warning(f"Skipping lick times for {cam} camera as no camera.times available.")
924+
# Compute pupil diameter, raw and smoothed
925+
_logger.info(f"Computing raw pupil diameter for {cam} camera.")
926+
features['pupilDiameter_raw'] = get_pupil_diameter(dlc_thresh)
927+
_logger.info(f"Computing smooth pupil diameter for {cam} camera.")
928+
features['pupilDiameter_smooth'] = get_smooth_pupil_diameter(features['pupilDiameter_raw'], cam)
929+
# Safe to pqt
930+
features_file = Path(self.session_path).joinpath('alf', f'_ibl_{cam}Camera.features.pqt')
931+
features.to_parquet(features_file)
932+
output_files.append(features_file)
933+
934+
# For all cams, compute DLC qc if times available
935+
if times and run_qc:
936+
# Setting download_data to False because at this point the data should be there
937+
qc = DlcQC(self.session_path, side=cam, one=self.one, download_data=False)
938+
qc.run(update=True)
918939
else:
919-
_logger.warning(f"Skipping lick times for {cam} camera as no camera.times available.")
920-
# Compute pupil diameter, raw and smoothed
921-
_logger.info(f"Computing raw pupil diameter for {cam} camera.")
922-
features['pupilDiameter_raw'] = get_pupil_diameter(dlc_thresh)
923-
_logger.info(f"Computing smooth pupil diameter for {cam} camera.")
924-
features['pupilDiameter_smooth'] = get_smooth_pupil_diameter(features['pupilDiameter_raw'], cam)
925-
# Safe to pqt
926-
features_file = Path(self.session_path).joinpath('alf', f'_ibl_{cam}Camera.features.pqt')
927-
features.to_parquet(features_file)
928-
output_files.append(features_file)
929-
930-
# For all cams, compute DLC qc if times available
931-
if times and run_qc:
932-
# Setting download_data to False because at this point the data should be there
933-
qc = DlcQC(self.session_path, side=cam, one=self.one, download_data=False)
934-
qc.run(update=True)
935-
else:
936-
if not times:
937-
_logger.warning(f"Skipping QC for {cam} camera as no camera.times available")
938-
if not run_qc:
939-
_logger.warning(f"Skipping QC for {cam} camera as run_qc=False")
940+
if not times:
941+
_logger.warning(f"Skipping QC for {cam} camera as no camera.times available")
942+
if not run_qc:
943+
_logger.warning(f"Skipping QC for {cam} camera as run_qc=False")
940944

945+
except BaseException:
946+
_logger.error(traceback.format_exc())
947+
self.status = -1
948+
continue
949+
950+
# Combined lick times
951+
if len(combined_licks) > 0:
952+
lick_times_file = Path(self.session_path).joinpath('alf', 'licks.times.npy')
953+
np.save(lick_times_file, sorted(np.concatenate(combined_licks)))
954+
output_files.append(lick_times_file)
955+
else:
956+
_logger.warning("No lick times computed for this session.")
957+
958+
if plot_qc:
959+
_logger.info("Creating DLC QC plot")
960+
try:
961+
session_id = self.one.path2eid(self.session_path)
962+
fig_path = self.session_path.joinpath('snapshot', 'dlc_qc_plot.png')
963+
if not fig_path.parent.exists():
964+
fig_path.parent.mkdir(parents=True, exist_ok=True)
965+
fig = dlc_qc_plot(self.one.path2eid(self.session_path), one=self.one)
966+
fig.savefig(fig_path)
967+
snp = ReportSnapshot(self.session_path, session_id, one=self.one)
968+
snp.outputs = [fig_path]
969+
snp.register_images(widths=['orig'],
970+
function=str(dlc_qc_plot.__module__) + '.' + str(dlc_qc_plot.__name__))
941971
except BaseException:
972+
_logger.error('Could not create and/or upload DLC QC Plot')
942973
_logger.error(traceback.format_exc())
943974
self.status = -1
944-
continue
945-
946-
# Combined lick times
947-
if len(combined_licks) > 0:
948-
lick_times_file = Path(self.session_path).joinpath('alf', 'licks.times.npy')
949-
np.save(lick_times_file, sorted(np.concatenate(combined_licks)))
950-
output_files.append(lick_times_file)
951-
else:
952-
_logger.warning("No lick times computed for this session.")
953975

954976
return output_files
955977

ibllib/pipes/histology.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -347,11 +347,11 @@ def create_channel_dict(traj, brain_locations):
347347
channel_dict = []
348348
for i in np.arange(brain_locations.id.size):
349349
channel_dict.append({
350-
'x': brain_locations.xyz[i, 0] * 1e6,
351-
'y': brain_locations.xyz[i, 1] * 1e6,
352-
'z': brain_locations.xyz[i, 2] * 1e6,
353-
'axial': brain_locations.axial[i],
354-
'lateral': brain_locations.lateral[i],
350+
'x': np.float64(brain_locations.xyz[i, 0] * 1e6),
351+
'y': np.float64(brain_locations.xyz[i, 1] * 1e6),
352+
'z': np.float64(brain_locations.xyz[i, 2] * 1e6),
353+
'axial': np.float64(brain_locations.axial[i]),
354+
'lateral': np.float64(brain_locations.lateral[i]),
355355
'brain_region': int(brain_locations.id[i]),
356356
'trajectory_estimate': traj['id']
357357
})

0 commit comments

Comments
 (0)