tkuanlun350 / 3DUnet-Tensorflow-Brats18

3D Unet biomedical segmentation model powered by tensorpack with fast io speed
202 stars 68 forks source link

use 'None' to define batch dimension when define Model inputs() #19

Open mini-Shark opened 5 years ago

mini-Shark commented 5 years ago

@tkuanlun350 Hi, thanks for your work. I met some problem when define model inputs. We usually use None to define batch dimension when define model input placeholder in tensorflow. So model can receive any batch size inputs. And in general, we can use larger batch size when we do model online evaluation. In your original code, you use a fixed size when define batch dimension at Unet3dModel.inputs(). I tried to modified to None as i have seen at other tensorpack example(they use 4d tensor input, but we use 5d tensor input). Unfortunately, some there raise some errors. Could you please help me figure out ? code and log are pasted as follow.

class Unet3dModel(ModelDesc):
    def __init__(self, model_name="unet3d", modelType="training",
                 inference_shape=config.INFERENCE_PATCH_SIZE):
        self.model_name = model_name
        self.modelType = modelType
        self.inference_shape = inference_shape
        print(self.modelType)

    def optimizer(self):
        lr = tf.get_variable('learning_rate', initializer=config.BASE_LR, trainable=False)
        tf.summary.scalar('learning_rate', lr)
        opt = tf.train.MomentumOptimizer(lr, 0.9)
        return opt

    def preprocess(self, image):
        # transform to NCDHW
        # original input is [batch, d, h, w, mod]
        return tf.transpose(image, [0, 4, 1, 2, 3])

    def inputs(self):
        S = config.PATCH_SIZE
        if self.modelType == 'training':
            ret = [
                tf.placeholder(tf.float32, (None, S[0], S[1], S[2], 4), 'image'),
                tf.placeholder(tf.float32, (None, S[0], S[1], S[2], 1), 'weight'),
                tf.placeholder(tf.float32, (None, S[0], S[1], S[2], 4), 'label')]
        else:
            S = self.inference_shape
            ret = [
                tf.placeholder(tf.float32, (config.BATCH_SIZE, S[0], S[1], S[2], 4), 'image')]
        return ret

[0217 21:24:33 @training.py:100] Building graph for training tower 0 on device /gpu:0 ... [0217 21:24:33 @registry.py:121] unet3d input: [None, 4, 32, 32, 32] Traceback (most recent call last): File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py", line 518, in make_tensor_proto str_values = [compat.as_bytes(x) for x in proto_values] File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py", line 518, in str_values = [compat.as_bytes(x) for x in proto_values] File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/util/compat.py", line 68, in as_bytes (bytes_or_text,)) TypeError: Expected binary or unicode string, got None

During handling of the above exception, another exception occurred:

Traceback (most recent call last): File "train.py", line 295, in launch_train_with_config(cfg, trainer) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/train/interface.py", line 82, in launch_train_with_config model._build_graph_get_cost, model.get_optimizer) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/utils/argtools.py", line 182, in wrapper return func(*args, kwargs) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/train/tower.py", line 165, in setup_graph train_callbacks = self._setup_graph(input, get_cost_fn, get_opt_fn) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/train/trainers.py", line 167, in _setup_graph self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/graph_builder/training.py", line 213, in build use_vs=[False] + [True] (len(self.towers) - 1)) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/graph_builder/training.py", line 107, in build_on_towers ret.append(func()) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/train/tower.py", line 192, in get_grad_fn cost = get_cost_fn(input.get_input_tensors()) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/tfutils/tower.py", line 207, in call output = self._tower_fn(args) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/graph_builder/model_desc.py", line 234, in _build_graph_get_cost ret = self.build_graph(inputs) File "train.py", line 97, in build_graph featuremap = model(self.model_name, image) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorpack/models/registry.py", line 124, in wrapped_func outputs = func(*args, *actual_args) File "/home/user/YangJing/brats/brats_chen/unet.py", line 23, in unet3d name="init_conv") File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/layers/convolutional.py", line 826, in conv3d return layer.apply(inputs) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 825, in apply return self.call(inputs, args, kwargs) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 714, in call outputs = self.call(inputs, *args, **kwargs) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/layers/convolutional.py", line 186, in call outputs_shape[4]]) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 5782, in reshape "Reshape", tensor=tensor, shape=shape, name=name) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 513, in _apply_op_helper raise err File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 510, in _apply_op_helper preferred_dtype=default_dtype) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1040, in internal_convert_to_tensor ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 235, in _constant_tensor_conversion_function return constant(v, dtype=dtype, name=name) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 214, in constant value, dtype=dtype, shape=shape, verify_shape=verify_shape)) File "/home/user/anaconda2/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py", line 522, in make_tensor_proto "supported type." % (type(values), values)) TypeError: Failed to convert object of type <class 'list'> to Tensor. Contents: [None, 16, 1024, 32]. Consider casting elements to a supported type