happyharrycn / actionformer_release

Code release for ActionFormer (ECCV 2022)
MIT License
415 stars 77 forks source link

训练报错 #124

Closed miaolin968 closed 8 months ago

miaolin968 commented 9 months ago

在thumos14数据集训练时报错: `Start training model LocPointTransformer ...

[Train]: Epoch 0 started Traceback (most recent call last): File "./train.py", line 178, in main(args) File "./train.py", line 124, in main train_one_epoch( File "/opt/data/private/msl/model/actionformer/libs/utils/train_utils.py", line 276, in train_one_epoch losses = model(video_list) File "/root/miniconda3/envs/action/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl return forward_call(*input, kwargs) File "/root/miniconda3/envs/action/lib/python3.8/site-packages/torch/nn/parallel/data_parallel.py", line 166, in forward return self.module(*inputs[0], *kwargs[0]) File "/root/miniconda3/envs/action/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl return forward_call(input, kwargs) File "/opt/data/private/msl/model/actionformer/libs/modeling/meta_archs.py", line 374, in forward losses = self.losses( File "/opt/data/private/msl/model/actionformer/libs/modeling/meta_archs.py", line 567, in losses cls_loss = sigmoid_focal_loss( RuntimeError: The following operation failed in the TorchScript interpreter. Traceback of TorchScript (most recent call last): RuntimeError: nvrtc: error: invalid value for --gpu-architecture (-arch)

nvrtc compilation failed:

define NAN __int_as_float(0x7fffffff)

define POS_INFINITY __int_as_float(0x7f800000)

define NEG_INFINITY __int_as_float(0xff800000)

template device T maximum(T a, T b) { return isnan(a) ? a : (a > b ? a : b); }

template device T minimum(T a, T b) { return isnan(a) ? a : (a < b ? a : b); }

extern "C" global void fused_sigmoid_mul_neg_498301238522319704(float tce_loss_1, double vgamma_2, float ttargets_5, float tinputs_5, float aten_mul_2, float aten_pow, float aten_add_3, float aten_add_1, float aten_sigmoid) { { if ((long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)<40800ll ? 1 : 0) { float tinputs_5_1 = __ldg(tinputs_5 + (long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)); aten_sigmoid[(long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)] = 1.f / (1.f + (expf(0.f - tinputs_5_1))); float ttargets_5_1 = __ldg(ttargets_5 + (long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)); aten_add_1[(long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)] = (0.f - ttargets_5_1) + 1.f; aten_add_3[(long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)] = (0.f - ((1.f / (1.f + (expf(0.f - tinputs_5_1)))) ttargets_5_1 + ((0.f - 1.f / (1.f + (expf(0.f - tinputs_5_1)))) + 1.f) ((0.f - ttargets_5_1) + 1.f))) + 1.f; aten_pow[(long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)] = powf((0.f - ((1.f / (1.f + (expf(0.f - tinputs_5_1)))) ttargets_5_1 + ((0.f - 1.f / (1.f + (expf(0.f - tinputs_5_1)))) + 1.f) ((0.f - ttargets_5_1) + 1.f))) + 1.f, (float)(vgamma_2)); float v = __ldg(tce_loss_1 + (long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)); aten_mul_2[(long long)(threadIdx.x) + 512ll (long long)(blockIdx.x)] = v (powf((0.f - ((1.f / (1.f + (expf(0.f - tinputs_5_1)))) ttargets_5_1 + ((0.f - 1.f / (1.f + (expf(0.f - tinputs_5_1)))) + 1.f) ((0.f - ttargets_5_1) + 1.f))) + 1.f, (float)(vgamma_2))); }} }`

我的GPU为RTX4090;CUDA版本为11.3,其他均按照install.md安装,请问这是什么原因?

miaolin968 commented 9 months ago

同样的一台机器,上周是没有问题的,这周就报错了

tzzcl commented 9 months ago

For your questions, I think you may reinstall the correct pytorch version prebuilt with CUDA 11.7/CUDA 11.8.