LorenFrankLab / spyglass

Neuroscience data analysis framework for reproducible research built by Loren Frank Lab at UCSF
https://lorenfranklab.github.io/spyglass/
MIT License
90 stars 41 forks source link

error when trying to run spikesorting for clusterless spikes #566

Closed MichaelCoulter closed 1 year ago

MichaelCoulter commented 1 year ago

Running spike sorting on {'nwb_file_name': 'CH10520210623.nwb', 'sort_group_id': 10, 'sort_interval_name': 'raw data valid times', 'preproc_params_name': 'franklab_tetrode_hippocampus', 'team_name': 'mcoulter section', 'sorter': 'clusterless_thresholder', 'sorter_params_name': 'clusterless_fixed', 'artifact_removed_interval_list_name': 'CH10520210623.nwb_raw data valid times_10_franklab_tetrode_hippocampus_ampl_100_2ms_artifact_removed_valid_times'}...

TypeError Traceback (most recent call last) Cell In [9], line 527 525 position_info_param_name = "default_decoding" 526 # want to skip tet 100 and 101, --> 527 populate_spike_threshold(recording_keys[2:], 528 mark_param_name=mark_param_name, 529 position_info_param_name=position_info_param_name ) 530 # some problem with the keys 531 for tetrode in (Curation & {'nwb_file_name':nwb_file_name}).fetch('sort_group_id'):

Cell In [9], line 463, in populate_spike_threshold(spikesorting_selection_keys, mark_param_name, position_info_param_name) 450 def populate_spike_threshold( 451 spikesorting_selection_keys: list, 452 mark_param_name='default', (...) 457 # either do spike sorting or marks - not both 458 ## Populate spike sorting 459 SpikeSortingSelection().insert( 460 spikesorting_selection_keys, 461 skip_duplicates=True, 462 ) --> 463 SpikeSorting.populate(spikesorting_selection_keys) 465 ## Skip any curation 466 curation_keys = [Curation.insert_curation( 467 key) for key in spikesorting_selection_keys]

File ~/anaconda3/envs/spyglass2/lib/python3.9/site-packages/datajoint/autopopulate.py:230, in AutoPopulate.populate(self, suppress_errors, return_exception_objects, reserve_jobs, order, limit, max_calls, display_progress, processes, make_kwargs, *restrictions) 226 if processes == 1: 227 for key in ( 228 tqdm(keys, desc=self.class.name) if display_progress else keys 229 ): --> 230 error = self._populate1(key, jobs, **populate_kwargs) 231 if error is not None: 232 error_list.append(error)

File ~/anaconda3/envs/spyglass2/lib/python3.9/site-packages/datajoint/autopopulate.py:281, in AutoPopulate._populate1(self, key, jobs, suppress_errors, return_exception_objects, make_kwargs) 279 self.class._allow_insert = True 280 try: --> 281 make(dict(key), **(make_kwargs or {})) 282 except (KeyboardInterrupt, SystemExit, Exception) as error: 283 try:

File ~/spyglass/src/spyglass/spikesorting/spikesorting_sorting.py:206, in SpikeSorting.make(self, key) 203 sorter_params.pop("whiten", None) 205 # Detect peaks for clusterless decoding --> 206 detected_spikes = detect_peaks(recording, **sorter_params) 207 sorting = si.NumpySorting.from_times_labels( 208 times_list=detected_spikes["sample_ind"], 209 labels_list=np.zeros(len(detected_spikes), dtype=np.int), 210 sampling_frequency=recording.get_sampling_frequency(), 211 ) 212 else:

File ~/spikeinterface/src/spikeinterface/sortingcomponents/peak_detection.py:80, in detect_peaks(recording, method, pipeline_nodes, gather_mode, folder, names, kwargs) 77 mp_context = method_class.preferred_mp_context 79 # prepare args ---> 80 method_args = method_class.check_params(recording, method_kwargs) 82 extra_margin = 0 83 if pipeline_nodes is None:

TypeError: check_params() got an unexpected keyword argument 'outputs'

any ideas? thanks.

lfrank commented 1 year ago

I would guess that this is a problem in one of the parameters sets where there is a dictionary entry params['outputs'].

Can you check?

MichaelCoulter commented 1 year ago

Sorter_params now has an entry called 'outputs'. ill remove that for clusterless thresholder.

/home/mcoulter/spyglass/src/spyglass/spikesorting/spikesorting_sorting.py(206)make() 204 205 # Detect peaks for clusterless decoding --> 206 detected_spikes = detect_peaks(recording, **sorter_params) 207 sorting = si.NumpySorting.from_times_labels( 208 times_list=detected_spikes["sample_ind"],

ipdb> sorter_params {'detect_threshold': 100.0, 'method': 'locally_exclusive', 'peak_sign': 'neg', 'local_radius_um': 100, 'noise_levels': array([1.]), 'random_chunk_kwargs': {}, 'outputs': 'sorting'}