Completely new to this, it would be a shame if i couldnt get it to work, running Ubuntu with 3090
Ancestral sampling 16 samples with temp=0.99, top_k=0, top_p=0.0 0/12 [01:09<?, ?it/s] Traceback (most recent call last): File "jukebox/sample.py", line 279, in <module> fire.Fire(run) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/fire/core.py", line 127, in Fire component_trace = _Fire(component, args, context, name) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/fire/core.py", line 366, in _Fire component, remaining_args) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/fire/core.py", line 542, in _CallCallable result = fn(*varargs, **kwargs) File "jukebox/sample.py", line 276, in run save_samples(model, device, hps, sample_hps) File "jukebox/sample.py", line 244, in save_samples ancestral_sample(labels, sampling_kwargs, priors, hps) File "jukebox/sample.py", line 127, in ancestral_sample zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps) File "jukebox/sample.py", line 102, in _sample zs = sample_level(zs, labels[level], sampling_kwargs[level], level, prior, total_length, hop_length, hps) File "jukebox/sample.py", line 85, in sample_level zs = sample_single_window(zs, labels, sampling_kwargs, level, prior, start, hps) File "jukebox/sample.py", line 69, in sample_single_window z_samples_i = prior.sample(n_samples=z_i.shape[0], z=z_i, z_conds=z_conds_i, y=y_i, **sampling_kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/prior/prior.py", line 271, in sample top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/prior/autoregressive.py", line 309, in primed_sample x_prime = self.transformer(x_prime, encoder_kv=encoder_kv, sample=True, fp16=fp16) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__ result = self.forward(*input, **kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/transformer/transformer.py", line 187, in forward x = l(x, encoder_kv=None, sample=sample) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__ result = self.forward(*input, **kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/transformer/transformer.py", line 64, in forward a = self.attn(self.ln_0(x), encoder_kv, sample) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__ result = self.forward(*input, **kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/transformer/factored_attention.py", line 291, in forward x = self.c_attn(x) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__ result = self.forward(*input, **kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/transformer/ops.py", line 99, in forward x = t.addmm(self.b.type_as(x), x.view(-1, x.size(-1)), self.w.type_as(x)) # If x if float then float else half RuntimeError: CUDA error: CUBLAS_STATUS_EXECUTION_FAILED when calling cublasGemmEx( handle, opa, opb, m, n, k, &falpha, a, CUDA_R_16F, lda, b, CUDA_R_16F, ldb, &fbeta, c, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)
Completely new to this, it would be a shame if i couldnt get it to work, running Ubuntu with 3090
Ancestral sampling 16 samples with temp=0.99, top_k=0, top_p=0.0 0/12 [01:09<?, ?it/s] Traceback (most recent call last): File "jukebox/sample.py", line 279, in <module> fire.Fire(run) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/fire/core.py", line 127, in Fire component_trace = _Fire(component, args, context, name) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/fire/core.py", line 366, in _Fire component, remaining_args) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/fire/core.py", line 542, in _CallCallable result = fn(*varargs, **kwargs) File "jukebox/sample.py", line 276, in run save_samples(model, device, hps, sample_hps) File "jukebox/sample.py", line 244, in save_samples ancestral_sample(labels, sampling_kwargs, priors, hps) File "jukebox/sample.py", line 127, in ancestral_sample zs = _sample(zs, labels, sampling_kwargs, priors, sample_levels, hps) File "jukebox/sample.py", line 102, in _sample zs = sample_level(zs, labels[level], sampling_kwargs[level], level, prior, total_length, hop_length, hps) File "jukebox/sample.py", line 85, in sample_level zs = sample_single_window(zs, labels, sampling_kwargs, level, prior, start, hps) File "jukebox/sample.py", line 69, in sample_single_window z_samples_i = prior.sample(n_samples=z_i.shape[0], z=z_i, z_conds=z_conds_i, y=y_i, **sampling_kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/prior/prior.py", line 271, in sample top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/prior/autoregressive.py", line 309, in primed_sample x_prime = self.transformer(x_prime, encoder_kv=encoder_kv, sample=True, fp16=fp16) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__ result = self.forward(*input, **kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/transformer/transformer.py", line 187, in forward x = l(x, encoder_kv=None, sample=sample) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__ result = self.forward(*input, **kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/transformer/transformer.py", line 64, in forward a = self.attn(self.ln_0(x), encoder_kv, sample) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__ result = self.forward(*input, **kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/transformer/factored_attention.py", line 291, in forward x = self.c_attn(x) File "/home/me/miniconda3/envs/jukebox/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__ result = self.forward(*input, **kwargs) File "/home/me/projects/open_ai/jukebox1/jukebox/jukebox/transformer/ops.py", line 99, in forward x = t.addmm(self.b.type_as(x), x.view(-1, x.size(-1)), self.w.type_as(x)) # If x if float then float else half RuntimeError: CUDA error: CUBLAS_STATUS_EXECUTION_FAILED when calling cublasGemmEx( handle, opa, opb, m, n, k, &falpha, a, CUDA_R_16F, lda, b, CUDA_R_16F, ldb, &fbeta, c, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)
Beautiful program tho