Open Wangrui025 opened 10 months ago
This is an issue with the library pytorch-fast-transformers because you haven't compiled the CUDA operators.
You need to clone it from https://github.com/idiap/fast-transformers.git and then install it manually using pip install .
This process will compile the .cpp and .cu files.
My CUDA version is 12.2 and I installed the latest torch version (2.1.2+cu121) and replaced setup.py with an absolute path.
@lru_cache(None)
def cuda_toolkit_available():
try:
call(["/usr/local/cuda-12.2/bin/nvcc"], stdout=DEVNULL, stderr=DEVNULL)
return True
After making these changes, the installation was successful.
attribute2music stage. I have download the attribute2music.pt checkpoint model from google drive. However when
cli_main()
File "interactive_dict_v5_1billion.py", line 418, in cli_main
attributes(args)
File "interactive_dict_v5_1billion.py", line 347, in attributes
translations = task.inference_step(
File "/opt/miniconda3/envs/MuseCoco/lib/python3.8/site-packages/fairseq/tasks/language_modeling.py", line 313, in inference_step
return generator.generate(
File "/opt/miniconda3/envs/MuseCoco/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 27, in decorate_context
return func(args, kwargs)
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/command_seq_generator.py", line 179, in generate
return self._generate(sample, kwargs)
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/command_seq_generator.py", line 315, in _generate
lprobs, avg_attn_scores = self.model.forward_decoder(
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/command_seq_generator.py", line 837, in forward_decoder
decoder_out = model.decoder.forward(tokens, sep_pos, encoder_out=encoder_out)
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/linear/transformer.py", line 193, in forward
x, extra = self.extract_features(
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/linear/transformer.py", line 216, in extract_features
return self.extract_features_scriptable(
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/linear/transformer.py", line 363, in extract_features_scriptable
x, layerattn, = layer(
File "/opt/miniconda3/envs/MuseCoco/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(input, kwargs)
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/linear/transformer_layer.py", line 58, in forward
x, attn = self.run_self_attn(
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/linear/transformer_layer.py", line 117, in run_self_attn
r = self.self_attn(query, query, query, attn_mask=None, key_padding_mask=key_padding_mask)
File "/opt/miniconda3/envs/MuseCoco/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(*input, *kwargs)
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/linear/attention_layer.py", line 114, in forward
new_values = self.inner_attention(
File "/opt/miniconda3/envs/MuseCoco/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1110, in _call_impl
return forward_call(input, kwargs)
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/linear/causal_linear_attention.py", line 70, in forward
V = causal_linear(
File "/workspace/music_gen/musecoco/2-attribute2music_model/linear_mask/linear/causal_linear_attention.py", line 23, in causal_linear
V_new = causal_dot_product(Q, K, V)
File "/opt/miniconda3/envs/MuseCoco/lib/python3.8/site-packages/fast_transformers/causal_product/init.py", line 43, in forward
CausalDotProduct.dot[device.type](
TypeError: 'NoneType' object is not callable
run bash interactive_1billion.sh 0 200
the following ERROR was reported: Traceback (most recent call last): File "interactive_dict_v5_1billion.py", line 428, in