vlab-kaist / NN101_23S

MIT License
6 stars 7 forks source link

[LAB] Week 2_Problem 2_이정우 #91

Closed james1147 closed 1 year ago

james1147 commented 1 year ago

Problem

Week 2_Problem 2

Source Code

import torch
from random import random 
from typing import Callable
import numpy as np

##                         Problem 2                          ##
##                                                            ##
##            Arbitary x_train, y_train are given.            ##
##     In this problem, x_train is list of list of float.     ##
##   Suppose that x and y have linear correlation, y=wx+b.    ##
##           (In this problem, w will be a vector.)           ##
##     In function training(), you should return [w, b].      ##
##          In function predict(), you should return          ##
##            list y_test corresponding to x_test.            ##
##                  Made by @jangyoujin0917                   ##
##                                                            ##

# NOTE : Feel free to use torch.optim and tensor.

def training(x_train : list[list[float]], y_train : list[float]) -> tuple[list[float], float]: # DO NOT MODIFY FUNCTION NAME
    # data normalization
    # 1. Prevents overflow when calculating MSE
    # 2. Prevents underfitting
    # Note that you need to convert [w, b] to the original scale.
    # w = w * (y_max - y_min)
    # b = b * (y_max - y_min) + y_min
    y_train_1=np.array(y_train)
    y_min = y_train_1.min()
    y_max = max(y_train_1)
    normalize = lambda y : (y - y_min)/(y_max - y_min)

    x_train = torch.tensor(x_train, dtype=torch.float32)
    y_train = torch.tensor(y_train, dtype=torch.float32)

    input_size = x_train.shape[1]
    output_size = 1
    model = torch.nn.Linear(input_size, output_size)
    optimizer = torch.optim.SGD(model.parameters(), lr=0.04) 
    loss_fn = torch.nn.MSELoss()  

    num_epochs = 1000
    for epoch in range(num_epochs):
        optimizer.zero_grad()  
        y_pred = model(x_train)
        loss = loss_fn(y_pred.view(-1), y_train) 
        loss.backward()
        optimizer.step()  

    denormalize = lambda y : y * (y_max - y_min) + y_min

    w = model.weight.detach().numpy().flatten() * (y_max - y_min)
    b = model.bias.detach().numpy().item() * (y_max - y_min) + y_min

    return w, b

def predict(x_train : list[list[float]], y_train : list[float], x_test : list[list[float]]) -> list[float]: # DO NOT MODIFY FUNCTION NAME

    x_test = torch.tensor(x_test, dtype=torch.float32)

    w, b = training(x_train, y_train)
    w = torch.tensor(w, dtype=torch.float32)
    b = torch.tensor(b, dtype=torch.float32)
    model = torch.nn.Linear(w.shape[0], 1)
    model.weight = torch.nn.Parameter(w.view(1, -1))
    model.bias = torch.nn.Parameter(b.view(-1))

    y_train_2=np.array(y_train)
    y_test = torch.matmul(x_test, w) + b
    y_min = y_train_2.min()
    y_max = y_train_2.max()
    y_test = [((y - y_train_2.min()) / (y_train_2.max() - y_train_2.min())) * (y_max - y_min) + y_min for y in y_test]

    return y_test

if __name__ == "__main__":
    x_train = [[0., 1.], [1., 0.], [2., 2.], [3., 1.], [4., 3.]]
    y_train = [3., 2., 7., 6., 11.] # y = x_0 + 2*x_1 + 1 # Note that not all test cases give clear line.
    x_test = [[5., 3.], [10., 6.], [8., 9.]]

    w, b = training(x_train, y_train)
    y_test = predict(x_train, y_train, x_test)

    print(w, b)
    print(y_test)

Description

.

Output (Optional)

No response

github-actions[bot] commented 1 year ago

This is an auto-generated grading output. Checking code of james1147 {'james1147': nan}

Dongyeongkim commented 1 year ago

This issue is closed now because of the lack of progress.