vmtmxmf5 / Pytorch-

pytorch로 머신러닝~딥러닝 구현
3 stars 0 forks source link

Basic Modelling #2

Open vmtmxmf5 opened 3 years ago

vmtmxmf5 commented 3 years ago
X_train = torch.FloatTensor([[73,  80,  75], 
                               [93,  88,  93], 
                               [89,  91,  80], 
                               [96,  98,  100],   
                               [73,  66,  70]]) 
y_train = torch.FloatTensor([152, 185, 180, 196, 142])[..., None]

W = torch.zeros((X_train.shape[1], 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)

optm = optim.SGD([W, b], lr=1e-5)
for i in range(21):
    y_hat = X_train.matmul(W) + b # X @ W
    loss = torch.mean((y_hat - y_train)**2)

    optm.zero_grad()
    loss.backward() # 기울기 계산
    optm.step()

    print(y_hat.squeeze().detach(), loss.item())

배치 적용

ds = TensorDataset(X_train, y_train)
dataloader = DataLoader(ds, batch_size=2, shuffle=True)
list(ds)

model = nn.Linear(X_train.shape[1], y_train.shape[1])
optm = torch.optim.SGD(model.parameters(), lr=1e-5)

for epoch in range(2000):
    for batch_idx, sample in enumerate(dataloader):
        X_train, y_train = sample

        pred = model(X_train)
        cost = F.mse_loss(pred, y_train)

        optm.zero_grad()
        cost.backward()
        optm.step()

        if epoch % 200 == 0:
            print('Batch : {}/{}, Loss : {}'.format(batch_idx+1, len(dataloader), cost.item()))

X_test = torch.FloatTensor([[73,80,75]])
model(X_test)

Class 구현

data와 optm은 그대로 사용

class myLinear(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.linear = nn.Linear(3, 1)

    def forward(self, x):
        return self.linear(x)

model = myLinear()

for epoch in range(2000):
    pred = model(X_train)
    cost = F.mse_loss(pred, y_train)

    sgd.zero_grad()
    cost.backward()
    sgd.step()

    if epoch % 200 == 0:
        print(cost.item(), pred.squeeze().detach())