lllyasviel / stable-diffusion-webui-forge

GNU Affero General Public License v3.0
7.36k stars 713 forks source link

Error when loading a model after latest commits #1060

Open gtx155 opened 1 month ago

gtx155 commented 1 month ago

Moving model(s) has taken 9.44 seconds 0%| | 0/10 [00:01<?, ?it/s] Traceback (most recent call last): File "D:\webui_forge_cu121_torch231\webui\modules_forge\main_thread.py", line 37, in loop task.work() File "D:\webui_forge_cu121_torch231\webui\modules_forge\main_thread.py", line 26, in work self.result = self.func(*self.args, self.kwargs) File "D:\webui_forge_cu121_torch231\webui\modules\txt2img.py", line 110, in txt2img_function processed = processing.process_images(p) File "D:\webui_forge_cu121_torch231\webui\modules\processing.py", line 809, in process_images res = process_images_inner(p) File "D:\webui_forge_cu121_torch231\webui\modules\processing.py", line 952, in process_images_inner samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) File "D:\webui_forge_cu121_torch231\webui\modules\processing.py", line 1323, in sample samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) File "D:\webui_forge_cu121_torch231\webui\modules\sd_samplers_kdiffusion.py", line 234, in sample samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs)) File "D:\webui_forge_cu121_torch231\webui\modules\sd_samplers_common.py", line 272, in launch_sampling return func() File "D:\webui_forge_cu121_torch231\webui\modules\sd_samplers_kdiffusion.py", line 234, in samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, extra_params_kwargs)) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context return func(*args, *kwargs) File "D:\webui_forge_cu121_torch231\webui\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m denoised = model(x, sigmas[i] s_in, extra_args) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, kwargs) File "D:\webui_forge_cu121_torch231\webui\modules\sd_samplers_cfg_denoiser.py", line 186, in forward denoised, cond_pred, uncond_pred = sampling_function(self, denoiser_params=denoiser_params, cond_scale=cond_scale, cond_composition=cond_composition) File "D:\webui_forge_cu121_torch231\webui\backend\sampling\sampling_function.py", line 339, in sampling_function denoised, cond_pred, uncond_pred = sampling_function_inner(model, x, timestep, uncond, cond, cond_scale, model_options, seed, return_full=True) File "D:\webui_forge_cu121_torch231\webui\backend\sampling\sampling_function.py", line 284, in sampling_function_inner cond_pred, uncond_pred = calc_cond_uncondbatch(model, cond, uncond, x, timestep, model_options) File "D:\webui_forge_cu121_torch231\webui\backend\sampling\sampling_function.py", line 254, in calc_cond_uncond_batch output = model.apply_model(inputx, timestep, c).chunk(batch_chunks) File "D:\webui_forge_cu121_torch231\webui\backend\modules\k_model.py", line 45, in apply_model model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, *extra_conds).float() File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(args, kwargs) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, kwargs) File "D:\webui_forge_cu121_torch231\webui\backend\nn\unet.py", line 713, in forward h = module(h, emb, context, transformer_options) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, *kwargs) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(args, kwargs) File "D:\webui_forge_cu121_torch231\webui\backend\nn\unet.py", line 83, in forward x = layer(x, context, transformer_options) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(*args, kwargs) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, *kwargs) File "D:\webui_forge_cu121_torch231\webui\backend\nn\unet.py", line 321, in forward x = block(x, context=context[i], transformer_options=transformer_options) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(args, kwargs) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, kwargs) File "D:\webui_forge_cu121_torch231\webui\backend\nn\unet.py", line 181, in forward return checkpoint(self._forward, (x, context, transformer_options), None, self.checkpoint) File "D:\webui_forge_cu121_torch231\webui\backend\nn\unet.py", line 12, in checkpoint return f(args) File "D:\webui_forge_cu121_torch231\webui\backend\nn\unet.py", line 235, in _forward n = self.attn1(n, context=context_attn1, value=value_attn1, transformer_options=extra_options) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl return self._call_impl(args, kwargs) File "D:\webui_forge_cu121_torch231\system\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl return forward_call(*args, kwargs) File "D:\webui_forge_cu121_torch231\webui\backend\nn\unet.py", line 154, in forward out = attention_function(q, k, v, self.heads, mask) File "D:\webui_forge_cu121_torch231\webui\backend\attention.py", line 345, in attention_pytorch out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False) RuntimeError: CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when calling cublasGemmStridedBatchedEx(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&falpha, a, CUDA_R_16BF, (int)lda, stridea, b, CUDA_R_16BF, (int)ldb, strideb, (void*)&fbeta, c, CUDA_R_16BF, (int)ldc, stridec, (int)num_batches, compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP) CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when calling cublasGemmStridedBatchedEx(handle, opa, opb, (int)m, (int)n, (int)k, (void*)&falpha, a, CUDA_R_16BF, (int)lda, stridea, b, CUDA_R_16BF, (int)ldb, strideb, (void*)&fbeta, c, CUDA_R_16BF, (int)ldc, stridec, (int)num_batches, compute_type, CUBLAS_GEMM_DEFAULT_TENSOR_OP) ** Error completing request Arguments: ('task(9jxtq5t95hpu5ut)', <gradio.route_utils.Request object at 0x00000205B4981E10>, 'cat', '', [], 1, 1, 5, 0, 1152, 896, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', 'Use same scheduler', '', '', None, 0, 10, 'DPM++ 2M', 'Karras', False, -1, False, -1, 0, 0, 0, False, 7, 1, 'Constant', 0, 'Constant', 0, 1, 'enable', 'MEAN', 'AD', 1, False, 0, 'anisotropic', 0, 'reinhard', 100, 0, 'subtract', 0, 0, 'gaussian', 'add', 0, 100, 127, 0, 'hard_clamp', 5, 0, 'None', 'None', False, 'MultiDiffusion', 768, 768, 64, 4, False, True, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', '', 0, '', '', 0, '', '', True, False, False, False, False, False, False, 0, False) {} Traceback (most recent call last): File "D:\webui_forge_cu121_torch231\webui\modules\call_queue.py", line 74, in f res = list(func(*args, **kwargs)) TypeError: 'NoneType' object is not iterable

caxel-ap commented 1 month ago

im getting this exact same error on laptop rtx3070 8gb vram, 16gb ram. sdxl works but not the nf4 flux

nvm01 commented 1 month ago

Same error here.