bkntr / napari-brainways

Brainways UI plugin for napari
GNU General Public License v3.0
6 stars 0 forks source link

Cell counting issue #18

Closed Giane98 closed 4 months ago

Giane98 commented 4 months ago

Dear Brainways team,

Thank you for the much-needed resource, I think the neuroscience community will greatly benefit from your work. I am writing you because I am having issues with the Stardist cell counting function. When we either select an ROI and run it on preview or run it on the entire section this error appears, it seems to indicate that the conditions for block size, overlap, and context aren't meeting the requirements set by the function:


AssertionError Traceback (most recent call last) File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/superqt/utils/_qthreading.py:613, in create_worker..reraise(e=AssertionError()) 612 def reraise(e): --> 613 raise e e = AssertionError()

File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/superqt/utils/_qthreading.py:175, in WorkerBase.run(self=) 173 warnings.filterwarnings("always") 174 warnings.showwarning = lambda *w: self.warned.emit(w) --> 175 result = self.work() self = <napari._qt.qthreading.FunctionWorker object at 0x2bf05ca60> 176 if isinstance(result, Exception): 177 if isinstance(result, RuntimeError): 178 # The Worker object has likely been deleted. 179 # A deleted wrapped C/C++ object may result in a runtime 180 # error that will cause segfault if we try to do much other 181 # than simply notify the user.

File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/superqt/utils/_qthreading.py:354, in FunctionWorker.work(self=) 353 def work(self) -> _R: --> 354 return self._func(*self._args, **self._kwargs) self._func = <bound method CellDetectorController._run_cell_detector_on_preview of <napari_brainways.controllers.cell_detector_controller.CellDetectorController object at 0x29f7edca0>> self = <napari._qt.qthreading.FunctionWorker object at 0x2bf05ca60> self._args = () self._kwargs = {}

File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/napari_brainways/controllers/cell_detector_controller.py:237, in CellDetectorController._run_cell_detector_on_preview(self=) 235 else: 236 cell_detector_params = self.ui.project.settings.default_cell_detector_params --> 237 return self.model.run_cell_detector( self = <napari_brainways.controllers.cell_detector_controller.CellDetectorController object at 0x29f7edca0> self.model = <brainways.pipeline.cell_detector.CellDetector object at 0x2a85171c0> cell_detector_params = CellDetectorParams(normalizer='quantile', normalizer_range=(0.98, 0.997)) self._crop = <class 'numpy.ndarray'> (4096, 3310) float32 238 image=self._crop, params=cell_detector_params 239 )

File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/brainways/pipeline/cell_detector.py:116, in CellDetector.run_cell_detector(self=, image=<class 'numpy.ndarray'> (4096, 3310) float32, params=CellDetectorParams(normalizer='quantile', normalizer_range=(0.98, 0.997)), kwargs={}) 112 def run_cell_detector( 113 self, image, params: CellDetectorParams, kwargs 114 ) -> np.ndarray: 115 normalizer = self.get_normalizer(params) --> 116 labels, details = self.stardist.predict_instances_big( self = <brainways.pipeline.cell_detector.CellDetector object at 0x2a85171c0> self.stardist = StarDist2D(2D_versatile_fluo): YXC → YXC ├─ Directory: None └─ Config2D(n_dim=2, axes='YXC', n_channel_in=1, n_channel_out=33, train_checkpoint='weights_best.h5', train_checkpoint_last='weights_last.h5', train_checkpoint_epoch='weights_now.h5', n_rays=32, grid=(2, 2), backbone='unet', n_classes=None, unet_n_depth=3, unet_kernel_size=[3, 3], unet_n_filter_base=32, unet_n_conv_per_depth=2, unet_pool=[2, 2], unet_activation='relu', unet_last_activation='relu', unet_batch_norm=False, unet_dropout=0.0, unet_prefix='', net_conv_after_unet=128, net_input_shape=[None, None, 1], net_mask_shape=[None, None, 1], train_shape_completion=False, train_completion_crop=32, train_patch_size=[256, 256], train_background_reg=0.0001, train_foreground_only=0.9, train_sample_cache=True, train_dist_loss='mae', train_loss_weights=[1, 0.2], train_class_weights=(1, 1), train_epochs=800, train_steps_per_epoch=400, train_learning_rate=0.0003, train_batch_size=8, train_n_val_patches=None, train_tensorboard=True, train_reduce_lr={'factor': 0.5, 'patience': 80, 'min_delta': 0}, use_gpu=False) image = <class 'numpy.ndarray'> (4096, 3310) float32 normalizer = <brainways.pipeline.cell_detector.QuantileNormalizer object at 0x294de0130> kwargs = {} 117 image, 118 axes="YX", 119 block_size=4096, 120 min_overlap=128, 121 normalizer=normalizer, 122 **kwargs, 123 ) 124 return labels

File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/stardist/models/base.py:919, in StarDistBase.predict_instances_big(self=StarDist2D(2D_versatile_fluo): YXC → YXC ├─ Dire..., 'patience': 80, 'min_delta': 0}, use_gpu=False), img=<class 'numpy.ndarray'> (4096, 3310) float32, axes='YX', block_size=(4096, 4096), min_overlap=(128, 128), context=(96, 96), labels_out=None, labels_out_dtype=<class 'numpy.int32'>, show_progress=True, **kwargs={'normalizer': }) 916 print(f"{a}: context of {c} is small, recommended to use at least {o}", flush=True) 918 # create block cover --> 919 blocks = BlockND.cover(img.shape, axes, block_size, min_overlap, context, grid) axes = 'YX' grid = (16, 16) block_size = (4096, 4096) min_overlap = (128, 128) context = (96, 96) img = <class 'numpy.ndarray'> (4096, 3310) float32 921 if np.isscalar(labels_out) and bool(labels_out) is False: 922 labels_out = None

File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/stardist/big.py:426, in BlockND.cover(shape=(4096, 3310), axes='YX', block_size=(4096, 4096), min_overlap=(128, 128), context=(96, 96), grid=(16, 16)) 423 assert n == len(block_size) == len(min_overlap) == len(context) == len(grid) 425 # compute cover for each dimension --> 426 cover_1d = [Block.cover(args) for args in zip(shape, block_size, min_overlap, context, grid)] shape = (4096, 3310) block_size = (4096, 4096) min_overlap = (128, 128) context = (96, 96) grid = (16, 16) 427 # return cover as Cartesian product of 1-dimensional blocks 428 return tuple(BlockND(i,blocks,axes) for i,blocks in enumerate(product(cover_1d)))

File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/stardist/big.py:426, in (.0=) 423 assert n == len(block_size) == len(min_overlap) == len(context) == len(grid) 425 # compute cover for each dimension --> 426 cover_1d = [Block.cover(args) for args in zip(shape, block_size, min_overlap, context, grid)] args = (3310, 4096, 128, 96, 16) 427 # return cover as Cartesian product of 1-dimensional blocks 428 return tuple(BlockND(i,blocks,axes) for i,blocks in enumerate(product(cover_1d)))

File ~/Documents/Brainways/.vr/lib/python3.9/site-packages/stardist/big.py:184, in Block.cover(size=3310, block_size=4096, min_overlap=128, context=96, grid=16, verbose=True) 168 @staticmethod 169 def cover(size, block_size, min_overlap, context, grid=1, verbose=True): 170 """Return chain of grid-aligned blocks to cover the interval [0,size]. 171 172 Parameters block_size, min_overlap, and context will be used (...) 182 183 """ --> 184 assert 0 <= min_overlap+2context < block_size <= size 0 <= min_overlap+2context < block_size <= size = False block_size = 4096 min_overlap = 128 context = 96 size = 3310 min_overlap+2context = 320 2context = 192 185 assert 0 < grid <= block_size 186 block_size = _grid_divisible(grid, block_size, name='block_size', verbose=verbose)

AssertionError:

Thank you in advance for the help!

Giane98 commented 4 months ago

Solved. The images we were using had lower pixel size. If we increased pixel width then the cell counting worked!