Closed gautham-phenomx closed 4 years ago
HI this is the architecture used for training the model, we are on the process of creating another repository with the training code
def CDFNet(params):
# Initialization
inputs = Input((int(params['patch_h']), int(params['patch_w']), params['num_channels']))
# Parameters for the Descending Arm
encoder_block1= sm.CompetitiveDenseBlockInput(inputs,params)
pool1 = MaxPooling2D(pool_size=params['pool'],strides=params['stride_pool'])(encoder_block1)
encoder_block2 = sm.CompetitiveDenseBlock(pool1, params)
pool2 = MaxPooling2D(pool_size=params['pool'],strides=params['stride_pool'])(encoder_block2)
encoder_block3 = sm.CompetitiveDenseBlock(pool2, params)
pool3 = MaxPooling2D(pool_size=params['pool'],strides=params['stride_pool'])(encoder_block3)
encoder_block4 = sm.CompetitiveDenseBlock(pool3, params)
pool4 = MaxPooling2D(pool_size=params['pool'], strides=params['stride_pool'])(encoder_block4)
bottleneck=sm.CompetitiveDenseBlock(pool4, params)
# Parameters for the Ascending Arm
up1=sm.CompetitiveUnpoolBlock(bottleneck,encoder_block4,params)
decoder_block4 = sm.CompetitiveDenseBlock(up1, params)
up2=sm.CompetitiveUnpoolBlock(decoder_block4,encoder_block3,params)
decoder_block3 = sm.CompetitiveDenseBlock(up2, params)
up3=sm.CompetitiveUnpoolBlock(decoder_block3,encoder_block2,params)
decoder_block2 = sm.CompetitiveDenseBlock(up3, params)
up4=sm.CompetitiveUnpoolBlock(decoder_block2,encoder_block1,params)
decoder_block1 = sm.CompetitiveDenseBlock(up4, params)
output=sm.ClassifierBlock(decoder_block1,params)
model = Model(inputs=[inputs], outputs=[output])
return model
def CompetitiveDenseBlockInput(x,params): ''' Function to define a dense block comprising of 3 convolutional layers, with BN/ReLU
Inputs:
-- Params
params = {'num_channels': 1,
'num_filters': 64,
'kernel_h': 5,
'kernel_w': 5,
'stride_conv': 1,
'pool': 2,
'stride_pool': 2,
'num_classes': 44
'kernel_c':1
}
'''
x0_bn=BatchNormalization()(x)
x0=Activation(activation='relu')(x0_bn)
x0=Conv2D(params['num_filters'], (params['kernel_h'], params['kernel_w']),strides=params['stride_conv'],padding='same')(x0)
x1_bn=BatchNormalization()(x0)
x1 = concatenate([x1_bn, x0_bn], axis=-1)
x1=Activation(activation='relu')(x1)
x1=Conv2D(params['num_filters'], (params['kernel_h'], params['kernel_w']),strides=params['stride_conv'],padding='same')(x1)
x2_bn=BatchNormalization()(x1)
x2_max = Lambda(sl.Maxout)([x2_bn, x1_bn])
x2=Activation(activation='relu')(x2_max)
out=Conv2D(params['num_filters'],(1,1),strides=params['stride_conv'],padding='same')(x2)
if params['dropout']:
out= Dropout(rate=params['dropout_rate'])(out,training=True)
return out
def CompetitiveDenseBlockInput(x,params): ''' Function to define a dense block comprising of 3 convolutional layers, with BN/ReLU
Inputs:
-- Params
params = {'num_channels': 1,
'num_filters': 64,
'kernel_h': 5,
'kernel_w': 5,
'stride_conv': 1,
'pool': 2,
'stride_pool': 2,
'num_classes': 44
'kernel_c':1
}
'''
x0_bn=BatchNormalization()(x)
x0=Activation(activation='relu')(x0_bn)
x0=Conv2D(params['num_filters'], (params['kernel_h'], params['kernel_w']),strides=params['stride_conv'],padding='same')(x0)
x1_bn=BatchNormalization()(x0)
x1 = concatenate([x1_bn, x0_bn], axis=-1)
x1=Activation(activation='relu')(x1)
x1=Conv2D(params['num_filters'], (params['kernel_h'], params['kernel_w']),strides=params['stride_conv'],padding='same')(x1)
x2_bn=BatchNormalization()(x1)
x2_max = Lambda(sl.Maxout)([x2_bn, x1_bn])
x2=Activation(activation='relu')(x2_max)
out=Conv2D(params['num_filters'],(1,1),strides=params['stride_conv'],padding='same')(x2)
if params['dropout']:
out= Dropout(rate=params['dropout_rate'])(out,training=True)
return out
def CompetitiveDenseBlock(x, params): ''' Function to define a dense block comprising of 3 convolutional layers, with BN/ReLU
Inputs:
-- Params
params = {'num_channels': 1,
'num_filters': 64,
'kernel_h': 5,
'kernel_w': 5,
'stride_conv': 1,
'pool': 2,
'stride_pool': 2,
'num_classes': 44
'kernel_c':1
}
'''
x0_bn=BatchNormalization()(x)
x0=Activation(activation='relu')(x0_bn)
x0=Conv2D(params['num_filters'], (params['kernel_h'], params['kernel_w']),strides=params['stride_conv'],padding='same')(x0)
x1_bn=BatchNormalization()(x0)
x1_max = Lambda(sl.Maxout)([x1_bn, x0_bn])
x1=Activation(activation='relu')(x1_max)
x1=Conv2D(params['num_filters'], (params['kernel_h'], params['kernel_w']),strides=params['stride_conv'],padding='same')(x1)
x2_bn=BatchNormalization()(x1)
x2_max = Lambda(sl.Maxout)([x2_bn,x1_max])
x2=Activation(activation='relu')(x2_max)
out=Conv2D(params['num_filters'],(1,1),strides=params['stride_conv'],padding='same')(x2)
if params['dropout']:
out = Dropout(rate=params['dropout_rate'])(out,training=True)
return out
def CompetitiveUnpoolBlock(x,skip,params): ''' Function to define a dense block comprising of 3 convolutional layers, with BN/ReLU
Inputs:
-- Params
params = {'num_channels': 1,
'num_filters': 64,
'kernel_h': 5,
'kernel_w': 5,
'stride_conv': 1,
'pool': 2,
'stride_pool': 2,
'num_classes': 44
'kernel_c':1
}
'''
x0=concatenate([Conv2DTranspose(params['num_filters'],params['pool'],
strides=params['stride_pool'],
padding='same')(x),skip],axis=-1)
x0 = Activation(activation='relu')(x0)
x0 = Conv2D(params['num_filters'], (params['kernel_h'], params['kernel_w']), strides=params['stride_conv'],
padding='same')(x0)
x0_bn=BatchNormalization()(x0)
skip_bn=BatchNormalization()(skip)
out= Lambda(sl.Maxout)([x0_bn,skip_bn])
return out
def Maxout(inputs):
"""
Maxout as in the paper Competitive Multi-Scale Convolution <https://arxiv.org/abs/1511.05635>
Args:
inputs (tf.Tensor): a NHWC or NC or NCHW ..
Returns:
tf.Tensor: of shape NHWC or NCHW named ``output``.
"""
assert (len(inputs) >=2),'They have to be at least two Tensors'
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
output = K.stack(inputs, axis=channel_axis)
output=K.tf.reduce_max(output,axis=channel_axis)
return output
Can you please provide the model architecture script. This will enable me to understand the training & model better as I am new to deep learning. Thank you