vlab-kaist / NN101_23S

MIT License
6 stars 7 forks source link

[LAB] Week 2_Problem 2_이현석 #72

Closed RadioActiveBlackTi closed 1 year ago

RadioActiveBlackTi commented 1 year ago

Problem

Week 2_Problem 2

Source Code

import torch
from random import random 
from typing import Callable

##                         Problem 2                          ##
##                                                            ##
##            Arbitary x_train, y_train are given.            ##
##     In this problem, x_train is list of list of float.     ##
##   Suppose that x and y have linear correlation, y=wx+b.    ##
##           (In this problem, w will be a vector.)           ##
##     In function training(), you should return [w, b].      ##
##          In function predict(), you should return          ##
##            list y_test corresponding to x_test.            ##
##                  Made by @jangyoujin0917                   ##
##                                                            ##

#attempt

# NOTE : Feel free to use torch.optim and tensor.

def training(x_train : list[list[float]], y_train : list[float]) -> tuple[list[float], float]: # DO NOT MODIFY FUNCTION NAME
    # data normalization
    # 1. Prevents overflow when calculating MSE
    # 2. Prevents underfitting
    # Note that you need to convert [w, b] to the original scale.
    # w = w * (y_max - y_min)
    # b = b * (y_max - y_min) + y_min
    y_min = min(y_train)
    y_max = max(y_train)

    normalize = lambda y : (y - y_min)/(y_max - y_min)

    ### IMPLEMENT FROM HERE
    n = len(x_train[0])
    m = len(y_train)

    w = torch.tensor([1 for i in range(n)], requires_grad=True, dtype=torch.float32)
    b = torch.tensor([1], requires_grad=True, dtype=torch.float32)
    y_train_normalized = list(map(normalize,y_train))

    x = torch.FloatTensor(x_train)
    y = torch.FloatTensor(y_train_normalized)

    epochs = 20000
    optimizer = torch.optim.Adam([w,b], lr=5e-3)

    for i in range(epochs):
        lf = w @ torch.transpose(x, 0, 1) + b
        cost = torch.sum((lf - y) ** 2) / (2 * m)

        optimizer.zero_grad()
        cost.backward()
        optimizer.step()

    return [list(map(lambda w: float(w) * (y_max - y_min),w)), float(b) * (y_max - y_min) + y_min]

def predict(x_train : list[list[float]], y_train : list[float], x_test : list[list[float]]) -> list[float]: # DO NOT MODIFY FUNCTION NAME
    w, b = training(x_train, y_train)
    y_test = []
    for x in x_test:
        value = 0
        for i in range(len(w)):
            value += w[i] * x[i]
        value += b
        y_test.append(value)
    return y_test

if __name__ == "__main__":
    x_train = [[0., 1.], [1., 0.], [2., 2.], [3., 1.], [4., 3.]]
    y_train = [3., 2., 7., 6., 11.] # y = x_0 + 2*x_1 + 1 # Note that not all test cases give clear line.
    x_test = [[5., 3.], [10., 6.], [8., 9.]]

    w, b = training(x_train, y_train)
    y_test = predict(x_train, y_train, x_test)

    print(w, b)
    print(y_test)

Description

ㅁㄴㅇㄹ

Output (Optional)

No response

github-actions[bot] commented 1 year ago

This is an auto-generated grading output. Your code failed to run. Please check again.

github-actions[bot] commented 1 year ago

This is an auto-generated grading output. Checking code of RadioActiveBlackTi {'RadioActiveBlackTi': nan}

github-actions[bot] commented 1 year ago

This is an auto-generated grading output. Checking code of RadioActiveBlackTi {'RadioActiveBlackTi': 70.4}