MiaoRain / lund

10 stars 2 forks source link

Combustion and CNN #1

Open MiaoRain opened 4 years ago

MiaoRain commented 4 years ago

image image problem》 problem:极值处(最大值, 最小值)loss 损失很大 solustion: add the loss weight in outlay points using L2 loss, Quantile Loss augmentate max/min points

loss fails to drop 1.increase layer numbers 2.loss functions 3.change SGD to Adagrad or Asadelta 4.initial parameters set

基于AlexNet来通过natural luminosity来预测 放热率

EDA-Exploratory Data Analysis :
问题: 1.数据不均衡。放热曲线近似正太分布,因此峰值处数据样本量较低低于其他数据的八分之一, 因此数据严重不平衡。

  1. 相关性复杂。 CA50之前照片亮度与放热率成强正相关, 在CA50之后则没有任何相关性。 3.欠拟合 采取措施: 1.增加极值(峰值,),用过采样oversampling, 相关样本数,why 2.数据进行裁剪,why 3.数据增强 水平翻转和竖直翻转,随机旋转, 不能用亮度增强或增加对比度等措施 4.用stride=2代替MaxPool why,has other measures toinstead of Pooling 5.引入focal loss,解决目标检测难样本分类, 即大的sample loss需要乘以一个较大权重, 6.增加层数 建模model 4.考虑到图片有一定时序性,所以 引入相近2到3张图片,作为输入即为9channels而不是3channels。 3->32->64->64->128->128 1281616 ->128->32 regression 用Sigmoid and classification 用Softmax why, why not? 常用激活函数有哪些

回归L1loss 分类CrossEntropyLoss what, how, Why, why not? optimizer 为 optim.Adam(params=net.parameters(), lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False) scheduler 为 torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 15, 20, 30, 40, 50, 60, 80], gamma=0.5) 调参: Batch_size = 8 Why? LR = 0,00005

torch.utils.data.random_split instead of KFold

三种load model 的方式

series, numpy, tensor 相互转化 pd.Series.to_numpy, torch.from_numpy

label 0-800 -> 0-1 标签归一化

MiaoRain commented 4 years ago

基于VGG

class AlexNet(nn.Module):
    def __init__(self):
        super(AlexNet, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 32, kernel_size=3,stride=2,padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=0),
            )

        self.conv2_1 = nn.Sequential(
            nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3,padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            )
        self.conv2_2 = nn.Sequential(
            nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3,stride=2,padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            ) #conv2_2 replace MaxPool2d

        self.conv3_1 = nn.Sequential(
            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3,padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            )
        self.conv3_2 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2,padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            )

        self.conv4_1 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3,padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            )
        self.conv4_2 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2,padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            )

        self.conv5_1 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3,padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            )      

        self.conv6 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3,padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            )

        self.conv7 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3,padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            )

#         self.fc1 = nn.Sequential(
#            nn.Linear(in_features=9216, out_features=1000),
#            nn.ReLU(),
#            nn.Dropout())       
        self.fc1 = nn.Sequential(
            nn.Linear(in_features=128*16*16, out_features=128),
            nn.ReLU(),
            #nn.Dropout()
            )
       #regression
        self.fc2 = nn.Sequential(
            nn.Linear(in_features=128, out_features=32),
            nn.ReLU(),
            )
        self.fc3 = nn.Sequential(
            nn.Linear(in_features=32, out_features=1),
            nn.Sigmoid(),#归一化后, 0-1
            #nn.Softmax(),
            )
        #classification
        self.fc4 = nn.Sequential(
            nn.Linear(in_features=128, out_features=32),
            nn.ReLU(),
        )
        self.fc5 = nn.Sequential(
            nn.Linear(in_features=32, out_features=4),
            nn.Softmax(),
            )

    def initialize(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight.data)        

    def forward(self, x):
        #pdb.set_trace()
        x = self.conv1(x)
        x = self.conv2_1(x)
        x = self.conv2_2(x)
        x = self.conv3_1(x)
        x = self.conv3_2(x)
        x = self.conv4_1(x)
        x = self.conv4_2(x)
        x = self.conv5_1(x)
        x = self.conv6(x)
        x = self.conv7(x)
        #pdb.set_trace()
        x = x.view(x.size(0), -1)
        x = self.fc1(x)
        #regression
        x1 = self.fc2(x)
        x1 = self.fc3(x1)
        #clssification
        x2 = self.fc4(x)
        x2 = self.fc5(x2)
        return x1,x2

image image image image

time cost 43.14071083068848 s

MiaoRain commented 4 years ago
from torch import nn
import torch as t
from torch.nn import functional as F

class ResidualBlock(nn.Module):
    #实现子module: Residual    Block
    def __init__(self,inchannel,outchannel,stride=1,shortcut=None):
        super(ResidualBlock,self).__init__()
        self.left=nn.Sequential(
            nn.Conv2d(inchannel,outchannel,3,stride,1,bias=False),
            nn.BatchNorm2d(outchannel),
            nn.ReLU(inplace=True),
            nn.Conv2d(outchannel,outchannel,3,1,1,bias=False),
            nn.BatchNorm2d(outchannel)
        )

        self.right=shortcut

    def forward(self,x):
        out=self.left(x)
        residual=x if self.right is None else self.right(x)
        out+=residual
        return F.relu(out)

class ResNet(nn.Module):
    #实现主module:ResNet34
    #ResNet34包含多个layer,每个layer又包含多个residual block
    #用子module实现residual block , 用 _make_layer 函数实现layer
    def __init__(self,num_classes=1000):
        super(ResNet,self).__init__()
        self.pre=nn.Sequential(
            nn.Conv2d(3,64,7,2,3,bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3,2,1)
        )
        #重复的layer,分别有3,4,6,3个residual block
        self.layer0=self._make_layer(32,32,3)
        self.layer1=self._make_layer(64,64,3)
        self.layer2=self._make_layer(64,128,4,stride=2)
        self.layer3=self._make_layer(128,256,6,stride=2)
        #self.layer4=self._make_layer(256,512,3,stride=2)

        #分类用的全连接
        #self.fc=nn.Linear(512,num_classes)
        self.fc1 = nn.Sequential(
            nn.Linear(in_features=2048, out_features=128),
            nn.ReLU(),
            #nn.Dropout()
            )
       #regression
        self.fc2 = nn.Sequential(
            nn.Linear(in_features=128, out_features=32),
            nn.ReLU(),
            )
        self.fc3 = nn.Sequential(
            nn.Linear(in_features=32, out_features=1),
            nn.Sigmoid(),#归一化后, 0-1
            #nn.Softmax(),
            )
        #classification
        self.fc4 = nn.Sequential(
            nn.Linear(in_features=128, out_features=32),
            nn.ReLU(),
        )
        self.fc5 = nn.Sequential(
            nn.Linear(in_features=32, out_features=4),
            nn.Softmax(),
            )

    def initialize(self):
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight.data) 

    def _make_layer(self,inchannel,outchannel,block_num,stride=1):
        #构建layer,包含多个residual block
        shortcut=nn.Sequential(
            nn.Conv2d(inchannel,outchannel,1,stride,bias=False),
            nn.BatchNorm2d(outchannel))

        layers=[ ]
        layers.append(ResidualBlock(inchannel,outchannel,stride,shortcut))

        for i in range(1,block_num):
            layers.append(ResidualBlock(outchannel,outchannel))
        return nn.Sequential(*layers)

    def forward(self,x):

        x=self.pre(x)#->[4, 64, 128, 128]

        x=self.layer1(x)#->[4, 64, 128, 128]
        x=self.layer2(x)#->[4, 128, 64, 64]
        x=self.layer3(x)#->[4, 256, 32, 32]
        #x=self.layer4(x)#->[4, 512, 16, 16]

        x=F.avg_pool2d(x,7)#->[4, 512, 2, 2]
        x=x.view(x.size(0),-1)#->[4, 2048]
        #pdb.set_trace()
        x = self.fc1(x) #->[4, 128]

        #regression
        x1 = self.fc2(x)#->[4, 32]
        x1 = self.fc3(x1)#->4, 1]
        #clssification
        x2 = self.fc4(x)#->[4, 32]
        x2 = self.fc5(x2)#->[4, 4]
        return x1,x2

image image image image