keke-nice / rPPG-MAE

20 stars 5 forks source link

loss_func_rPPG的参数输入顺序是否反了 #12

Open 408550969 opened 3 months ago

408550969 commented 3 months ago

loss = loss_func_rPPG(outputs.unsqueeze(dim=1), Wave) 而P_loss3的定义是: def forward(self, gt_lable, pre_lable): gt在前面而pre在后面,但是在main_finetune下却是pre在前,gt在后,这里是否有问题?

408550969 commented 3 months ago

把def forward的gt和pre交换后,训练20个epoch后,loss降到0.2,print gt和pre,发现pre和gt差了好多,实际MAE在54左右,这是为什么?

gt_lable tensor([[[0.2313, 0.1565, 0.0748, ..., 0.0884, 0.0544, 0.1088]],

    [[0.7347, 0.8707, 0.7143,  ..., 0.7891, 0.4830, 0.2585]],

    [[0.1633, 0.1088, 0.2517,  ..., 0.2653, 0.3469, 0.3810]],

    ...,

    [[0.0779, 0.0714, 0.1623,  ..., 0.7857, 0.9286, 0.7338]],

    [[0.7208, 0.4351, 0.2403,  ..., 0.0844, 0.0714, 0.2532]],

    [[0.2792, 0.3636, 0.3636,  ..., 0.3052, 0.2338, 0.1688]]],
   device='cuda:0')

pre_lable tensor([[[ 0.0014, -0.0062, -0.0204, ..., 0.1585, 0.1487, 0.1152]],

    [[-0.0052, -0.0055,  0.0087,  ...,  0.0259,  0.0039, -0.0163]],

    [[-0.0155, -0.0274, -0.0338,  ...,  0.0224,  0.0059, -0.0129]],

    ...,

    [[-0.0716, -0.0768, -0.0594,  ...,  0.0462,  0.0095, -0.0176]],

    [[ 0.0865,  0.0758,  0.0643,  ...,  0.1502,  0.1256,  0.0827]],

    [[ 0.0889,  0.1045,  0.1072,  ...,  0.0971,  0.0543,  0.0148]]],
   device='cuda:0', grad_fn=<UnsqueezeBackward0>)

gt_lable tensor([[[0.1753, 0.1104, 0.0714, ..., 0.0260, 0.4805, 0.9481]],

    [[0.9610, 0.8247, 0.5974,  ..., 0.2727, 0.1558, 0.0519]],

    [[0.0844, 0.1364, 0.3182,  ..., 0.3571, 0.3312, 0.2792]],

    ...,

    [[1.0000, 0.8980, 0.7551,  ..., 0.2245, 0.1088, 0.0612]],

    [[0.1701, 0.1633, 0.3469,  ..., 0.4218, 0.3673, 0.3129]],

    [[0.3469, 0.2653, 0.1973,  ..., 0.1224, 0.0748, 0.8027]]],
   device='cuda:0')

pre_lable tensor([[[ 0.0318, 0.0015, -0.0131, ..., 0.0301, 0.0004, -0.0301]],

    [[-0.0363, -0.0922, -0.1321,  ...,  0.0565,  0.0336,  0.0207]],

    [[ 0.0454,  0.0278,  0.0398,  ...,  0.0124,  0.0328,  0.0540]],

    ...,

    [[ 0.1293,  0.0871,  0.0432,  ..., -0.0387, -0.0182,  0.0212]],

    [[-0.0150, -0.0649, -0.1102,  ..., -0.1209, -0.1332, -0.1226]],

    [[ 0.0262, -0.0297, -0.0838,  ...,  0.0724,  0.0578,  0.0507]]],
   device='cuda:0', grad_fn=<UnsqueezeBackward0>)

gt_lable tensor([[[0.0816, 0.2789, 0.9728, ..., 0.7551, 0.5170, 0.2585]],

    [[0.3878, 0.1497, 0.0680,  ..., 0.2993, 0.4354, 0.4218]],

    [[0.4082, 0.3878, 0.3129,  ..., 0.2041, 0.1497, 0.0952]],

    ...,

    [[0.0867, 0.2600, 0.3933,  ..., 0.2667, 0.2000, 0.1133]],

    [[0.2467, 0.1667, 0.1133,  ..., 0.3267, 0.7333, 0.8867]],

    [[0.4667, 1.0000, 0.7800,  ..., 0.2067, 0.0600, 0.0533]]],
   device='cuda:0')

pre_lable tensor([[[-0.0035, -0.0070, -0.0019, ..., -0.0624, -0.0671, -0.0344]],

    [[ 0.0075, -0.0308, -0.0627,  ..., -0.0250, -0.0590, -0.0614]],

    [[ 0.0650,  0.0416,  0.0058,  ..., -0.0104,  0.0075,  0.0371]],

    ...,

    [[ 0.0301,  0.0055, -0.0073,  ...,  0.0471, -0.0083, -0.0529]],

    [[ 0.0918,  0.0716,  0.0410,  ...,  0.0683,  0.0061, -0.0491]],

    [[ 0.0408,  0.0605,  0.0501,  ..., -0.0119, -0.0405, -0.0381]]],
   device='cuda:0', grad_fn=<UnsqueezeBackward0>)