alembics / disco-diffusion

Other
7.45k stars 1.13k forks source link

RuntimeError: "addmm_impl_cpu_" not implemented for 'Half' #154

Closed Mr-Robot-ops closed 1 year ago

Mr-Robot-ops commented 1 year ago

``RuntimeError Traceback (most recent call last)

in 216 torch.cuda.empty_cache() 217 try: --> 218 do_run() 219 except KeyboardInterrupt: 220 pass

12 frames

in do_run() 507 for prompt in frame_prompt: 508 txt, weight = parse_prompt(prompt) --> 509 txt = clip_model.encode_text(clip.tokenize(prompt).to(device)).float() 510 511 if args.fuzzy_prompt:

/content/CLIP/clip/model.py in encode_text(self, text) 346 x = x + self.positional_embedding.type(self.dtype) 347 x = x.permute(1, 0, 2) # NLD -> LND --> 348 x = self.transformer(x) 349 x = x.permute(1, 0, 2) # LND -> NLD 350 x = self.ln_final(x).type(self.dtype)

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, *kwargs) 1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1129 or _global_forward_hooks or _global_forward_pre_hooks): -> 1130 return forward_call(input, **kwargs) 1131 # Do not call functions when jit is used 1132 full_backward_hooks, non_full_backward_hooks = [], []

/content/CLIP/clip/model.py in forward(self, x) 201 202 def forward(self, x: torch.Tensor): --> 203 return self.resblocks(x) 204 205

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, *kwargs) 1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1129 or _global_forward_hooks or _global_forward_pre_hooks): -> 1130 return forward_call(input, **kwargs) 1131 # Do not call functions when jit is used 1132 full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/container.py in forward(self, input) 137 def forward(self, input): 138 for module in self: --> 139 input = module(input) 140 return input 141

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, *kwargs) 1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1129 or _global_forward_hooks or _global_forward_pre_hooks): -> 1130 return forward_call(input, **kwargs) 1131 # Do not call functions when jit is used 1132 full_backward_hooks, non_full_backward_hooks = [], []

/content/CLIP/clip/model.py in forward(self, x) 188 189 def forward(self, x: torch.Tensor): --> 190 x = x + self.attention(self.ln_1(x)) 191 x = x + self.mlp(self.ln_2(x)) 192 return x

/content/CLIP/clip/model.py in attention(self, x) 185 def attention(self, x: torch.Tensor): 186 self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None --> 187 return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] 188 189 def forward(self, x: torch.Tensor):

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, *kwargs) 1128 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1129 or _global_forward_hooks or _global_forward_pre_hooks): -> 1130 return forward_call(input, **kwargs) 1131 # Do not call functions when jit is used 1132 full_backward_hooks, non_full_backward_hooks = [], []

/usr/local/lib/python3.7/dist-packages/torch/nn/modules/activation.py in forward(self, query, key, value, key_padding_mask, need_weights, attn_mask, average_attn_weights) 1158 training=self.training, 1159 key_padding_mask=key_padding_mask, need_weights=need_weights, -> 1160 attn_mask=attn_mask, average_attn_weights=average_attn_weights) 1161 if self.batch_first and is_batched: 1162 return attn_output.transpose(1, 0), attn_output_weights

/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in multi_head_attention_forward(query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight, q_proj_weight, k_proj_weight, v_proj_weight, static_k, static_v, average_attn_weights) 5064 if not use_separate_proj_weight: 5065 assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None" -> 5066 q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) 5067 else: 5068 assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"

/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in _in_projection_packed(q, k, v, w, b) 4743 if q is k: 4744 # self-attention -> 4745 return linear(q, w, b).chunk(3, dim=-1) 4746 else: 4747 # encoder-decoder attention

RuntimeError: "addmm_implcpu" not implemented for 'Half'``

excing commented 1 year ago

Hello, how did you solve this problem? Thanks.

lawrence-peng commented 1 year ago

i have the some issue, how do you to solve this problem please?