I used other models like vgg16 or resnet50 in torchvision.models, loss can decline step by step,but validate accurancy dont't improve. always 50%. I don't know why. Do you have some opinions about this?
class vgg(nn.Module): def __init__(self, embedding_size,net_mode): super(vgg, self).__init__() assert net_mode in ['vgg16'], 'mode should be vgg16' self.featuresvgg16 = vgg16(pretrained=True).features self.featuresLayer = nn.Sequential( nn.Linear(512 * 3 * 3, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, embedding_size), ) def forward(self, x): x = self.featuresvgg16(x) x = x.view(x.size(0), -1) x = self.featuresLayer(x) # 512维特征 x = self.l2_norm(x) return x
I used other models like vgg16 or resnet50 in torchvision.models, loss can decline step by step,but validate accurancy dont't improve. always 50%. I don't know why. Do you have some opinions about this?
class vgg(nn.Module): def __init__(self, embedding_size,net_mode): super(vgg, self).__init__() assert net_mode in ['vgg16'], 'mode should be vgg16' self.featuresvgg16 = vgg16(pretrained=True).features self.featuresLayer = nn.Sequential( nn.Linear(512 * 3 * 3, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, embedding_size), ) def forward(self, x): x = self.featuresvgg16(x) x = x.view(x.size(0), -1) x = self.featuresLayer(x) # 512维特征 x = self.l2_norm(x) return x