Open Jinu-Lee opened 4 years ago
0번 논문에서 참고문헌 [44]를 보고 만듦
실행 전 전처리
import pandas as pd
df = pd.read_pickle('./wafer-map/processed_data.pkl')
def to_binary_image(x):
x[x != 2] = 0
x[x == 2] = 1
return x
df.wafer_map = df.wafer_map.apply(to_binary_image)
mapping_type={'Center':0,'Donut':1,'Edge-Loc':2,'Edge-Ring':3,'Loc':4,'Random':5,'Scratch':6,'Near-full':7,'none':8}
df=df.replace({'failure_type':mapping_type})
df.drop(['wafer_map_shape'], axis=1).reset_index(drop=True).to_pickle('./wafer-map/binary_map_for_CNN.pkl')
CNN
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import pandas as pd
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from PIL import Image
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
class WaferMapDataset(Dataset):
def __init__(self):
self.df = pd.read_pickle('./wafer-map/binary_map_for_CNN.pkl')
self.wafer_map = self.df.wafer_map
self.failure_type = self.df.failure_type
self.transform = transforms.Compose([transforms.Resize(size=(286, 400)), transforms.ToTensor()])
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
image = self.transform(Image.fromarray(self.wafer_map.loc[index], mode='1'))
type = self.failure_type.loc[index]
return image, type
batch_size = 256
num_epochs = 20
test_split = .3
dataset = WaferMapDataset()
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(test_split * dataset_size)
np.random.seed(42)
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=train_sampler)
test_loader = DataLoader(dataset=dataset, batch_size=batch_size, sampler=test_sampler)
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d((2, 2), stride=(2, 2), ceil_mode=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d((2, 2), stride=(2, 2), ceil_mode=True)
)
self.conv3 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d((2, 2), stride=(2, 2), ceil_mode=True),
)
self.fc1 = nn.Linear(128 * 7 * 7, 625)
self.fc2 = nn.Linear(625, 9)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(-1, 128 * 7 * 7)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net().to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(params=model.parameters(), lr=0.001, betas=(0.9, 0.999))
train_losses = []
test_losses = []
test_accuracy = []
def train(epoch):
train_loss = 0
model.train()
for i, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = loss_function(output, target)
loss.backward()
train_loss += loss.item()
optimizer.step()
train_loss /= len(train_loader)
train_losses.append(train_loss)
return train_loss
def evaluate():
model.eval()
test_loss = 0
num_correct = 0
total_guesses = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
predict = torch.argmax(output, 1)
test_loss += loss_function(output, target).item()
num_correct += torch.eq(target, predict).sum().item()
total_guesses += batch_size
test_loss /= len(test_loader)
accuracy = 100 * num_correct / total_guesses
test_losses.append(test_loss)
test_accuracy.append(accuracy)
return test_loss, accuracy
for epoch in range(1, num_epochs + 1):
train_loss = train(epoch)
test_loss, accuracy = evaluate()
print('[{} / {}]\tTrain loss: {:.4f}\tTest loss: {:.4f}\tAccuracy : {:.4f}%'.format(epoch, num_epochs, train_loss, test_loss, accuracy))
Accuracy는 92.5% 정도
Denoise한 이후도 비슷
논문에 나온 여러 CNN(Convolutional Neural Network) 모델의 구현과 정확도 등
공정택 교수님이 올려주신 논문들 정리중.. 구현 예정