Open wcatch opened 5 years ago
Hi. I did this project for a company so I can not provide you the trained weights. If you use pretrained weights from ImageNet, you can obtain a similar result as I posted. Hello lzccccc Is pretrained weights from imagenet usage is something like this one? We need to set the trainable layer to false or not?
inputs = layers.Input(shape=(cfg().norm_h, cfg().norm_w, 3))
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv1')(inputs)
x = layers.Conv2D(64, (3, 3),
activation='relu',
padding='same',
name='block1_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv1')(x)
x = layers.Conv2D(128, (3, 3),
activation='relu',
padding='same',
name='block2_conv2')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv1')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv2')(x)
x = layers.Conv2D(256, (3, 3),
activation='relu',
padding='same',
name='block3_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block4_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv1')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv2')(x)
x = layers.Conv2D(512, (3, 3),
activation='relu',
padding='same',
name='block5_conv3')(x)
x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = layers.GlobalAveragePooling2D()(x)
model1 = tf.keras.Model([inputs], [x], name='vgg16_1')
model1.load_weights('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
model1.summary()
for layer in model1.layers[:]:
layer.trainable = False
for layer in model1.layers:
print(layer, layer.trainable)
Hi. I did this project for a company so I can not provide you the trained weights. If you use pretrained weights from ImageNet, you can obtain a similar result as I posted.