Oneflow-Inc / OneFlow-Benchmark

OneFlow models for benchmarking.
104 stars 31 forks source link

add new model of Ghostnet #145

Closed fengyuchao97 closed 3 years ago

fengyuchao97 commented 3 years ago

import torch import torch.nn as nn import math

all = ['ghost_net']

def _make_divisible(v, divisor, min_value=None): """ This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py """ if min_value is None: min_value = divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)

Make sure that round down does not go down by more than 10%.

if new_v < 0.9 * v:
    new_v += divisor
return new_v

class SELayer(nn.Module): def init(self, channel, reduction=4): super(SELayer, self).init() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, channel // reduction), nn.ReLU(inplace=True), nn.Linear(channel // reduction, channel), )

def forward(self, x):
    b, c, _, _ = x.size()
    y = self.avg_pool(x).view(b, c)
    y = self.fc(y).view(b, c, 1, 1)
    y = torch.clamp(y, 0, 1)
    return x * y

def depthwise_conv(inp, oup, kernel_size=3, stride=1, relu=False): return nn.Sequential( nn.Conv2d(inp, oup, kernel_size, stride, kernel_size//2, groups=inp, bias=False), nn.BatchNorm2d(oup), nn.ReLU(inplace=True) if relu else nn.Sequential(), )

class GhostModule(nn.Module): def init(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True): super(GhostModule, self).init() self.oup = oup init_channels = math.ceil(oup / ratio) new_channels = init_channels*(ratio-1)

    self.primary_conv = nn.Sequential(
        nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False),
        nn.BatchNorm2d(init_channels),
        nn.ReLU(inplace=True) if relu else nn.Sequential(),
    )

    self.cheap_operation = nn.Sequential(
        nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False),
        nn.BatchNorm2d(new_channels),
        nn.ReLU(inplace=True) if relu else nn.Sequential(),
    )

def forward(self, x):
    x1 = self.primary_conv(x)
    x2 = self.cheap_operation(x1)
    out = torch.cat([x1,x2], dim=1)
    return out[:,:self.oup,:,:]

class GhostBottleneck(nn.Module): def init(self, inp, hidden_dim, oup, kernel_size, stride, use_se): super(GhostBottleneck, self).init() assert stride in [1, 2]

    self.conv = nn.Sequential(
        # pw
        GhostModule(inp, hidden_dim, kernel_size=1, relu=True),
        # dw
        depthwise_conv(hidden_dim, hidden_dim, kernel_size, stride, relu=False) if stride==2 else nn.Sequential(),
        # Squeeze-and-Excite
        SELayer(hidden_dim) if use_se else nn.Sequential(),
        # pw-linear
        GhostModule(hidden_dim, oup, kernel_size=1, relu=False),
    )

    if stride == 1 and inp == oup:
        self.shortcut = nn.Sequential()
    else:
        self.shortcut = nn.Sequential(
            depthwise_conv(inp, inp, kernel_size, stride, relu=False),
            nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
            nn.BatchNorm2d(oup),
        )

def forward(self, x):
    return self.conv(x) + self.shortcut(x)

class GhostNet(nn.Module): def init(self, cfgs, num_classes=1000, width_mult=1.): super(GhostNet, self).init()

setting of inverted residual blocks

    self.cfgs = cfgs

    # building first layer
    output_channel = _make_divisible(16 * width_mult, 4)
    layers = [nn.Sequential(
        nn.Conv2d(3, output_channel, 3, 2, 1, bias=False),
        nn.BatchNorm2d(output_channel),
        nn.ReLU(inplace=True)
    )]
    input_channel = output_channel

    # building inverted residual blocks
    block = GhostBottleneck
    for k, exp_size, c, use_se, s in self.cfgs:
        output_channel = _make_divisible(c * width_mult, 4)
        hidden_channel = _make_divisible(exp_size * width_mult, 4)
        layers.append(block(input_channel, hidden_channel, output_channel, k, s, use_se))
        input_channel = output_channel
    self.features = nn.Sequential(*layers)

    # building last several layers
    output_channel = _make_divisible(exp_size * width_mult, 4)
    self.squeeze = nn.Sequential(
        nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False),
        nn.BatchNorm2d(output_channel),
        nn.ReLU(inplace=True),
        nn.AdaptiveAvgPool2d((1, 1)),
    )
    input_channel = output_channel

    output_channel = 1280
    self.classifier = nn.Sequential(
        nn.Linear(input_channel, output_channel, bias=False),
        nn.BatchNorm1d(output_channel),
        nn.ReLU(inplace=True),
        nn.Dropout(0.2),
        nn.Linear(output_channel, num_classes),
    )

    self._initialize_weights()

def forward(self, x):
    x = self.features(x)
    x = self.squeeze(x)
    x = x.view(x.size(0), -1)
    x = self.classifier(x)
    return x

def _initialize_weights(self):
    for m in self.modules():
        if isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
        elif isinstance(m, nn.BatchNorm2d):
            m.weight.data.fill_(1)
            m.bias.data.zero_()

def ghost_net(**kwargs): """ Constructs a GhostNet model """ cfgs = [

k, t, c, SE, s

    [3,  16,  16, 0, 1],
    [3,  48,  24, 0, 2],
    [3,  72,  24, 0, 1],
    [5,  72,  40, 1, 2],
    [5, 120,  40, 1, 1],
    [3, 240,  80, 0, 2],
    [3, 200,  80, 0, 1],
    [3, 184,  80, 0, 1],
    [3, 184,  80, 0, 1],
    [3, 480, 112, 1, 1],
    [3, 672, 112, 1, 1],
    [5, 672, 160, 1, 2],
    [5, 960, 160, 0, 1],
    [5, 960, 160, 1, 1],
    [5, 960, 160, 0, 1],
    [5, 960, 160, 1, 1]
]
return GhostNet(cfgs, **kwargs)

if name=='main': model = ghost_net() model.eval() print(model) input = torch.randn(32,3,224,224) y = model(input) print(y)

python3 of_cnn_train_val.py \ --train_data_dir=$DATA_ROOT/train \ --train_data_part_num=256 \ --val_data_dir=$DATA_ROOT/validation \ --val_data_part_num=256 \ --num_nodes=1 \ --gpu_num_per_node=8 \ --optimizer="rmsprop" \ --decay_rate=0.9 \ --momentum=0.9 \ --learning_rate=0.4 \ --wd=0.00004 \ --lr_decay="exponential" \ --lr_decay_rate=0.94 \ --lr_decay_epochs=2 \ --loss_print_every_n_iter=100 \ --batch_size_per_device=128 \ --val_batch_size_per_device=128 \ --num_epoch=800 \ --warmup_epochs=0 \ --model="ghostnet" \

JiaqingFu commented 3 years ago

@fyc1997 跟bert 代码有冲突了,你可以先建一个自己的分支,然后先pull master,再提交