SpikeInterface / spikeinterface

A Python-based module for creating flexible and robust spike sorting pipelines.
https://spikeinterface.readthedocs.io
MIT License
493 stars 188 forks source link

ids_to_indices is called on indices rather than ids #1601

Open timsainb opened 1 year ago

timsainb commented 1 year ago

I get the following error:


sorting = ss.run_kilosort3(
    recording_cmr,
    output_folder=output_folder,
    remove_existing_folder=True,
    verbose=True,
)

RUNNING SHELL SCRIPT: /n/groups/datta/tim_sainburg/projects/23-02-24-chronic-pipeline/data/spikesorting/tmp/test_ks3/sorter_output/run_kilosort3.sh

                            < M A T L A B (R) >

                  Copyright 1984-2021 The MathWorks, Inc.

             R2021a Update 3 (9.10.0.1684407) 64-bit (glnxa64)

                                May 27, 2021

...

Saving results to Phy

kilosort3 run time 185.54s
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[33], line 1
----> 1 sorting_TDC = ss.run_kilosort3(
      2     recording_cmr,
      3     output_folder=output_folder,
      4     remove_existing_folder=True,
      5     verbose=True,
      6 )

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/sorters/runsorter.py:696, in run_kilosort3(*args, **kwargs)
    695 def run_kilosort3(*args, **kwargs):
--> 696     return run_sorter('kilosort3', *args, **kwargs)

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/sorters/runsorter.py:143, in run_sorter(sorter_name, recording, output_folder, remove_existing_folder, delete_output_folder, verbose, raise_error, docker_image, singularity_image, with_output, **sorter_params)
    136             container_image = singularity_image
    137     return run_sorter_container(
    138         container_image=container_image,
    139         mode=mode,
    140         **common_kwargs,
    141     )
--> 143 return run_sorter_local(**common_kwargs)

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/sorters/runsorter.py:165, in run_sorter_local(sorter_name, recording, output_folder, remove_existing_folder, delete_output_folder, verbose, raise_error, with_output, **sorter_params)
    163 SorterClass.run_from_folder(output_folder, raise_error, verbose)
    164 if with_output:
--> 165     sorting = SorterClass.get_result_from_folder(output_folder)
    166 else:
    167     sorting = None

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/sorters/basesorter.py:290, in BaseSorter.get_result_from_folder(cls, output_folder)
    287     sorting = cls._get_result_from_folder(output_folder)
    289 # register recording to Sorting object
--> 290 recording = load_extractor(output_folder / 'spikeinterface_recording.json')
    291 if recording is not None:
    292     # can be None when not dumpable
    293     sorting.register_recording(recording)

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/core/base.py:920, in load_extractor(file_or_folder_or_dict, base_folder)
    918     return BaseExtractor.from_dict(file_or_folder_or_dict, base_folder=base_folder)
    919 else:
--> 920     return BaseExtractor.load(file_or_folder_or_dict, base_folder=base_folder)

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/core/base.py:537, in BaseExtractor.load(file_path, base_folder)
    535         print('The extractor was not dumpable')
    536         return None
--> 537     extractor = BaseExtractor.from_dict(d, base_folder=base_folder)
    538     return extractor
    540 elif file_path.is_dir():
    541     # case from a folder after a calling extractor.save(...)

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/core/base.py:370, in BaseExtractor.from_dict(d, base_folder)
    368     assert base_folder is not None, 'When  relative_paths=True, need to provide base_folder'
    369     d = _make_paths_absolute(d, base_folder)
--> 370 extractor = _load_extractor_from_dict(d)
    371 folder_metadata = d.get('folder_metadata', None)
    372 if folder_metadata is not None:

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/core/base.py:863, in _load_extractor_from_dict(dic)
    859     print('Versions are not the same. This might lead to errors. Use ', class_name.split('.')[0],
    860           'version', dic['version'])
    862 # instantiate extrator object
--> 863 extractor = cls(**kwargs)
    865 extractor._annotations.update(dic['annotations'])
    866 for k, v in dic['properties'].items():
    867     # print(k, v)

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/preprocessing/common_reference.py:91, in CommonReferenceRecording.__init__(self, recording, reference, operator, groups, ref_channel_ids, local_radius, verbose, dtype)
     89 # tranforms groups (ids) to groups (indices)
     90 if groups is not None:
---> 91     groups = [self.ids_to_indices(g) for g in groups]
     92 if ref_channel_ids is not None:
     93     ref_channel_inds = self.ids_to_indices(ref_channel_ids)

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/preprocessing/common_reference.py:91, in <listcomp>(.0)
     89 # tranforms groups (ids) to groups (indices)
     90 if groups is not None:
---> 91     groups = [self.ids_to_indices(g) for g in groups]
     92 if ref_channel_ids is not None:
     93     ref_channel_inds = self.ids_to_indices(ref_channel_ids)

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/core/base.py:102, in BaseExtractor.ids_to_indices(self, ids, prefer_slice)
    100 else:
    101     _main_ids = self._main_ids.tolist()
--> 102     indices = np.array([_main_ids.index(id) for id in ids], dtype=int)
    103     if prefer_slice:
    104         if np.all(np.diff(indices) == 1):

File /n/groups/datta/tim_sainburg/projects/spikeinterface/src/spikeinterface/core/base.py:102, in <listcomp>(.0)
    100 else:
    101     _main_ids = self._main_ids.tolist()
--> 102     indices = np.array([_main_ids.index(id) for id in ids], dtype=int)
    103     if prefer_slice:
    104         if np.all(np.diff(indices) == 1):

ValueError: 5 is not in list

What's happening is that _main_ids is

['CH5', 'CH6', 'CH7', 'CH8', 'CH10', 'CH12', 'CH13', 'CH14', 'CH15', 'CH16', 'CH17', 'CH18', 'CH19', 'CH20', 'CH22', 'CH24', 'CH25', 'CH26', 'CH27', 'CH28', 'CH29', 'CH30', 'CH31', 'CH32', 'CH33', 'CH34', 'CH35', 'CH36', 'CH37', 'CH38', 'CH39', 'CH40', 'CH41', 'CH42', 'CH43', 'CH44', 'CH45', 'CH46', 'CH47', 'CH48', 'CH49', 'CH50', 'CH51', 'CH52', 'CH53', 'CH54', 'CH55', 'CH56', 'CH57', 'CH58', 'CH59', 'CH60', 'CH61', 'CH62', 'CH63', 'CH64']

while ids is:

[5, 6, 7, 8, 9, 20, 22, 23, 38, 39, 50, 51, 52, 53, 54, 55]

So, 5 is not in _main_ids, because earlier on ids has already been converted to indices.

The groups are already saved as indices in spikeinterface_recording.json

samuelgarcia commented 1 year ago

Hi Tim, you are back! This is a good news. run_kilosort3() should be replace by run_sorter('kilosort3', ...)

This is weird! I huess you working wit the latest rouse. Can you tell us how you compute recording_cmr ?

timsainb commented 1 year ago

Hey Sam! Yes I'm back doing ephys again after working in just behavior for a while.

Here is how I computte recording _cmr

recording = se.read_openephys(folder_path=sort_path, block_index=0)
recording = recording.set_probe(probe, group_mode='by_shank')
# bandpass filter
recording_f = spre.bandpass_filter(
    recording_clean, freq_min=freq_min, freq_max=freq_max
)
# common average reference
recording_cmr = spre.common_reference(
    recording_f, reference="global", operator="median", groups=channel_shanks
)

And if set groups to None in common_reference, I get rid of the error.

h-mayorquin commented 2 months ago

I am unable to reproduce this error:

from spikeinterface.core import generate_ground_truth_recording
import spikeinterface.preprocessing as spre
from spikeinterface.sorters import run_sorter

recording, sorting_orignal = generate_ground_truth_recording(num_channels=384, durations=[10.0])
channel_ids = [str(channel_id) for channel_id in recording.get_channel_ids()]
recording = recording.rename_channels(new_channel_ids=channel_ids)

# bandpass filter
recording_f = spre.bandpass_filter(
    recording,
)

groups = [channel_ids[:-10], channel_ids[-10:]] 
recording_cmr = spre.common_reference(
    recording_f, reference="global", operator="median", groups=groups
)

sorting = run_sorter(sorter_name="kilosort3", remove_existing_folder=True, recording=recording_cmr, folder="test_kilosort3", verbose=True,  docker_image=True)

I think it might have been solved by some changes we did to crm.

Could you test it again @timsainb ? If there is no reponse we should close this in a month or so.