a2569875 / stable-diffusion-webui-composable-lora

This extension replaces the built-in LoRA forward procedure.
GNU Affero General Public License v3.0
155 stars 14 forks source link

Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0 #21

Open sashasubbbb opened 11 months ago

sashasubbbb commented 11 months ago

Using latest dev webui. Error appears when using IA3 lycoris in prompt. Any idea how to fix it? Thank you.

   Traceback (most recent call last):
      File "B:\AIimages\stable-diffusion-webui\modules\call_queue.py", line 58, in f
        res = list(func(*args, **kwargs))
      File "B:\AIimages\stable-diffusion-webui\modules\call_queue.py", line 37, in f
        res = func(*args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\modules\txt2img.py", line 62, in txt2img
        processed = processing.process_images(p)
      File "B:\AIimages\stable-diffusion-webui\modules\processing.py", line 677, in process_images
        res = process_images_inner(p)
      File "B:\AIimages\stable-diffusion-webui\extensions\sd-webui-controlnet\scripts\batch_hijack.py", line 42, in processing_process_images_hijack
        return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\modules\processing.py", line 783, in process_images_inner
        p.setup_conds()
      File "B:\AIimages\stable-diffusion-webui\modules\processing.py", line 1191, in setup_conds
        super().setup_conds()
      File "B:\AIimages\stable-diffusion-webui\modules\processing.py", line 364, in setup_conds
        self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, self.steps * self.step_multiplier, [self.cached_uc], self.extra_network_data)
      File "B:\AIimages\stable-diffusion-webui\modules\processing.py", line 353, in get_conds_with_caching
        cache[1] = function(shared.sd_model, required_prompts, steps)
      File "B:\AIimages\stable-diffusion-webui\modules\prompt_parser.py", line 163, in get_learned_conditioning
        conds = model.get_learned_conditioning(texts)
      File "B:\AIimages\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 669, in get_learned_conditioning
        c = self.cond_stage_model(c)
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\modules\sd_hijack_clip.py", line 234, in forward
        z = self.process_tokens(tokens, multipliers)
      File "B:\AIimages\stable-diffusion-webui\modules\sd_hijack_clip.py", line 271, in process_tokens
        z = self.encode_with_transformers(tokens)
      File "B:\AIimages\stable-diffusion-webui\modules\sd_hijack_clip.py", line 324, in encode_with_transformers
        outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1538, in _call_impl
        result = forward_call(*args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 811, in forward
        return self.text_model(
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 721, in forward
        encoder_outputs = self.encoder(
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 650, in forward
        layer_outputs = encoder_layer(
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 379, in forward
        hidden_states, attn_weights = self.self_attn(
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 269, in forward
        key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
      File "B:\AIimages\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
        return forward_call(*args, **kwargs)
      File "B:\AIimages\stable-diffusion-webui\extensions\stable-diffusion-webui-composable-lora\composable_lora.py", line 466, in lora_Linear_forward
        res = lora_forward(self, input, res)
      File "B:\AIimages\stable-diffusion-webui\extensions\stable-diffusion-webui-composable-lora\composable_lora.py", line 77, in lora_forward
        patch = composable_lycoris.get_lora_patch(module, input, res, lora_layer_name)
      File "B:\AIimages\stable-diffusion-webui\extensions\stable-diffusion-webui-composable-lora\composable_lycoris.py", line 114, in get_lora_patch
        return get_lora_inference(converted_module, input)
      File "B:\AIimages\stable-diffusion-webui\extensions\stable-diffusion-webui-composable-lora\composable_lycoris.py", line 78, in get_lora_inference
        return module.inference(input)
      File "B:\AIimages\stable-diffusion-webui\extensions\stable-diffusion-webui-composable-lora\composable_lycoris.py", line 200, in inference
        return self.op(x, self.w, **self.extra_args)
    RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat in method wrapper_CUDA_addmv_)

---