Traceback (most recent call last):
File "C:\Users\Dimitrie\stable-diffusion-webui\modules\ui.py", line 223, in f
res = list(func(*args, *kwargs))
File "C:\Users\Dimitrie\stable-diffusion-webui\webui.py", line 63, in f
res = func(args, kwargs)
File "C:\Users\Dimitrie\stable-diffusion-webui\modules\txt2img.py", line 48, in txt2img
processed = process_images(p)
File "C:\Users\Dimitrie\stable-diffusion-webui\modules\processing.py", line 407, in process_images
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) [p.negative_prompt], p.steps)
File "C:\Users\Dimitrie\stable-diffusion-webui\modules\prompt_parser.py", line 138, in get_learned_conditioning
conds = model.get_learned_conditioning(texts)
File "C:\Users\Dimitrie\stable-diffusion-webui\repositories\stable-diffusion\ldm\models\diffusion\ddpm.py", line 558, in get_learned_conditioning
c = self.cond_stage_model(c)
File "C:\Users\Dimitrie\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl
return forward_call(input, kwargs)
File "C:\Users\Dimitrie\stable-diffusion-webui\modules\sd_hijack.py", line 334, in forward
z1 = self.process_tokens(tokens, multipliers)
File "C:\Users\Dimitrie\stable-diffusion-webui\extensions\aesthetic-gradients\aesthetic_clip.py", line 219, in call
img_embs = slerp(self.image_embs, text_embs_2, self.aesthetic_slerp_angle)
File "C:\Users\Dimitrie\stable-diffusion-webui\extensions\aesthetic-gradients\aesthetic_clip.py", line 139, in slerp
low_norm = low / torch.norm(low, dim=1, keepdim=True)
File "C:\Users\Dimitrie\stable-diffusion-webui\venv\lib\site-packages\torch\functional.py", line 1472, in norm
return _VF.frobenius_norm(input, _dim, keepdim=keepdim)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
My own aesthetic embeddings and fantasy(from the original repo) work
Traceback (most recent call last): File "C:\Users\Dimitrie\stable-diffusion-webui\modules\ui.py", line 223, in f res = list(func(*args, *kwargs)) File "C:\Users\Dimitrie\stable-diffusion-webui\webui.py", line 63, in f res = func(args, kwargs) File "C:\Users\Dimitrie\stable-diffusion-webui\modules\txt2img.py", line 48, in txt2img processed = process_images(p) File "C:\Users\Dimitrie\stable-diffusion-webui\modules\processing.py", line 407, in process_images uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) [p.negative_prompt], p.steps) File "C:\Users\Dimitrie\stable-diffusion-webui\modules\prompt_parser.py", line 138, in get_learned_conditioning conds = model.get_learned_conditioning(texts) File "C:\Users\Dimitrie\stable-diffusion-webui\repositories\stable-diffusion\ldm\models\diffusion\ddpm.py", line 558, in get_learned_conditioning c = self.cond_stage_model(c) File "C:\Users\Dimitrie\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl return forward_call(input, kwargs) File "C:\Users\Dimitrie\stable-diffusion-webui\modules\sd_hijack.py", line 334, in forward z1 = self.process_tokens(tokens, multipliers) File "C:\Users\Dimitrie\stable-diffusion-webui\extensions\aesthetic-gradients\aesthetic_clip.py", line 219, in call img_embs = slerp(self.image_embs, text_embs_2, self.aesthetic_slerp_angle) File "C:\Users\Dimitrie\stable-diffusion-webui\extensions\aesthetic-gradients\aesthetic_clip.py", line 139, in slerp low_norm = low / torch.norm(low, dim=1, keepdim=True) File "C:\Users\Dimitrie\stable-diffusion-webui\venv\lib\site-packages\torch\functional.py", line 1472, in norm return _VF.frobenius_norm(input, _dim, keepdim=keepdim) IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
My own aesthetic embeddings and fantasy(from the original repo) work