aimclub / Fedot.Industrial

Python framework for automated time series classification, regression and forecasting
https://fedotindustrial.readthedocs.io
BSD 3-Clause "New" or "Revised" License
84 stars 7 forks source link

Sweep: Update of unit-test required #148

Closed technocreep closed 3 months ago

technocreep commented 4 months ago

we need to create unit-tests for losses.py located in fedot_ind/core/models/nn/network_modules/ directory. Test module must be written with pytest library and placed in tests/unit/core/models/nn/network_modules/ directory

sweep-ai[bot] commented 4 months ago

🚀 Here's the PR! #149

💎 Sweep Pro: You have unlimited Sweep issues

Actions

Relevant files (click to expand). Mentioned files will always appear here. https://github.com/aimclub/Fedot.Industrial/blob/c5c358ba8f9b87b626014d2da9b2135c82684258/fedot_ind/core/models/nn/network_modules/losses.py#L1-L264 https://github.com/aimclub/Fedot.Industrial/blob/c5c358ba8f9b87b626014d2da9b2135c82684258/fedot_ind/core/models/nn/network_impl/base_nn_model.py#L1-L253

Step 2: ⌨️ Coding

tests/unit/core/models/test_losses.py

Create a new file named `test_losses.py` in the `tests/unit/core/models/nn/network_modules/` directory with the following content:
import torch
import torch.nn as nn
from fedot_ind.core.models.nn.network_modules.losses import lambda_prepare, ExpWeightedLoss, HuberLoss, LogCoshLoss, MaskedLossWrapper, CenterLoss, CenterPlusLoss, FocalLoss, TweedieLoss, SMAPELoss, RMSELoss

def test_lambda_prepare_int():
    val = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
    lambda_ = 5
    result = lambda_prepare(val, lambda_)
    assert torch.allclose(result, torch.tensor([[5.0, 5.0]]))

def test_lambda_prepare_list():
    val = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
    lambda_ = [1, 2]
    result = lambda_prepare(val, lambda_)
    assert torch.allclose(result, torch.tensor([[1.0, 2.0]]))

def test_lambda_prepare_tensor():
    val = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
    lambda_ = torch.tensor([3.0, 4.0])
    result = lambda_prepare(val, lambda_)
    assert torch.allclose(result, torch.tensor([3.0, 4.0]))

def test_exp_weighted_loss():
    time_steps = 5
    tolerance = 0.1
    loss_fn = ExpWeightedLoss(time_steps, tolerance)

    input_ = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [9.0, 10.0]])
    target = torch.tensor([[1.5, 2.5], [3.5, 4.5], [5.5, 6.5], [7.5, 8.5], [9.5, 10.5]])

    loss = loss_fn(input_, target)
    assert torch.isclose(loss, torch.tensor(0.5), atol=1e-3)

def test_huber_loss_mean():
    loss_fn = HuberLoss(reduction='mean', delta=1.0)

    input_ = torch.tensor([1.0, 2.0, 3.0])
    target = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(input_, target)
    assert torch.isclose(loss, torch.tensor(0.25))

def test_huber_loss_sum():
    loss_fn = HuberLoss(reduction='sum', delta=1.0)

    input_ = torch.tensor([1.0, 2.0, 3.0])
    target = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(input_, target)
    assert torch.isclose(loss, torch.tensor(0.75))

def test_huber_loss_none():
    loss_fn = HuberLoss(reduction='none', delta=1.0)

    input_ = torch.tensor([1.0, 2.0, 3.0])
    target = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(input_, target)
    assert torch.allclose(loss, torch.tensor([0.125, 0.25, 0.375]))

def test_log_cosh_loss_mean():
    loss_fn = LogCoshLoss(reduction='mean', delta=1.0)

    input_ = torch.tensor([1.0, 2.0, 3.0])
    target = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(input_, target)
    assert torch.isclose(loss, torch.tensor(0.4463), atol=1e-4)

def test_log_cosh_loss_sum():
    loss_fn = LogCoshLoss(reduction='sum', delta=1.0)

    input_ = torch.tensor([1.0, 2.0, 3.0])
    target = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(input_, target)
    assert torch.isclose(loss, torch.tensor(1.3389), atol=1e-4)

def test_log_cosh_loss_none():
    loss_fn = LogCoshLoss(reduction='none', delta=1.0)

    input_ = torch.tensor([1.0, 2.0, 3.0])
    target = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(input_, target)
    assert torch.allclose(loss, torch.tensor([0.4463, 0.4463, 0.4463]), atol=1e-4)

def test_masked_loss_wrapper():
    loss_fn = nn.MSELoss()
    masked_loss_fn = MaskedLossWrapper(loss_fn)

    input_ = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
    target = torch.tensor([[1.5, float('nan')], [3.5, 4.5]])

    loss = masked_loss_fn(input_, target)
    assert torch.isclose(loss, torch.tensor(0.125))

def test_center_loss():
    c_out = 3
    logits_dim = 2
    loss_fn = CenterLoss(c_out, logits_dim)

    x = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
    labels = torch.tensor([0, 1, 2])

    loss = loss_fn(x, labels)
    assert torch.isclose(loss, torch.tensor(18.0))

def test_center_plus_loss():
    c_out = 3
    logits_dim = 2
    loss_fn = nn.CrossEntropyLoss()
    center_plus_loss_fn = CenterPlusLoss(loss_fn, c_out, λ=0.1, logits_dim=logits_dim)

    x = torch.tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
    labels = torch.tensor([0, 1, 2])

    loss = center_plus_loss_fn(x, labels)
    assert torch.isclose(loss, torch.tensor(3.3133), atol=1e-4)

def test_focal_loss_mean():
    alpha = torch.tensor([0.25, 0.75])
    gamma = 2.0
    reduction = 'mean'
    loss_fn = FocalLoss(alpha, gamma, reduction)

    x = torch.tensor([[0.8, 0.2], [0.3, 0.7]])
    y = torch.tensor([0, 1])

    loss = loss_fn(x, y)
    assert torch.isclose(loss, torch.tensor(0.0595), atol=1e-4)

def test_focal_loss_sum():
    alpha = None
    gamma = 1.5
    reduction = 'sum'
    loss_fn = FocalLoss(alpha, gamma, reduction)

    x = torch.tensor([[0.8, 0.2], [0.3, 0.7]])
    y = torch.tensor([0, 1])

    loss = loss_fn(x, y)
    assert torch.isclose(loss, torch.tensor(0.2707), atol=1e-4)

def test_focal_loss_none():
    alpha = torch.tensor([0.5, 0.5])
    gamma = 1.0
    reduction = 'none'
    loss_fn = FocalLoss(alpha, gamma, reduction)

    x = torch.tensor([[0.8, 0.2], [0.3, 0.7]])
    y = torch.tensor([0, 1])

    loss = loss_fn(x, y)
    assert torch.allclose(loss, torch.tensor([0.0400, 0.1050]), atol=1e-4)

def test_tweedie_loss_p1_5():
    p = 1.5
    eps = 1e-8
    loss_fn = TweedieLoss(p, eps)

    inp = torch.tensor([1.0, 2.0, 3.0])
    targ = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(inp, targ)
    assert torch.isclose(loss, torch.tensor(0.2500), atol=1e-4)

def test_tweedie_loss_p1_8():
    p = 1.8
    eps = 1e-8
    loss_fn = TweedieLoss(p, eps)

    inp = torch.tensor([1.0, 2.0, 3.0])
    targ = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(inp, targ)
    assert torch.isclose(loss, torch.tensor(0.1667), atol=1e-4)

def test_smape_loss():
    loss_fn = SMAPELoss()

    input_ = torch.tensor([1.0, 2.0, 3.0])
    target = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(input_, target)
    assert torch.isclose(loss, torch.tensor(16.6667), atol=1e-4)

def test_rmse_loss():
    loss_fn = RMSELoss()

    input_ = torch.tensor([1.0, 2.0, 3.0])
    target = torch.tensor([1.5, 2.5, 3.5])

    loss = loss_fn(input_, target)
    assert torch.isclose(loss, torch.tensor(0.5))

Step 3: 🔄️ Validating

Your changes have been successfully made to the branch sweep/update_of_unittest_required. I have validated these changes using a syntax checker and a linter.


[!TIP] To recreate the pull request, edit the issue title or description.

This is an automated message generated by Sweep AI.