Haoming02 / sd-webui-resharpen

An Extension for Automatic1111 Webui that increases/decreases the details of images
MIT License
70 stars 5 forks source link

'NoneType' object is not iterable when changing resolution #4

Closed trihardseven closed 4 months ago

trihardseven commented 4 months ago

The extension works as expected until I change the resolution to anything other than 1024x1024. It happens even with all other extensions disabled. Here is the error:

Begin to load 1 model
Reuse 1 loaded models
[Memory Management] Current Free GPU Memory (MB) =  16281.34912109375
[Memory Management] Model Memory (MB) =  0.0
[Memory Management] Minimal Inference Memory (MB) =  1024.0
[Memory Management] Estimated Remaining GPU Memory (MB) =  15257.34912109375
Moving model(s) has taken 0.03 seconds
  0%|                                                                                                                                                                                                                | 0/28 [00:00<?, ?it/s]
Traceback (most recent call last):
  File "N:\AI\stable-diffusion-webui-forge\modules_forge\main_thread.py", line 37, in loop
    task.work()
  File "N:\AI\stable-diffusion-webui-forge\modules_forge\main_thread.py", line 26, in work
    self.result = self.func(*self.args, **self.kwargs)
  File "N:\AI\stable-diffusion-webui-forge\modules\txt2img.py", line 111, in txt2img_function
    processed = processing.process_images(p)
  File "N:\AI\stable-diffusion-webui-forge\modules\processing.py", line 752, in process_images
    res = process_images_inner(p)
  File "N:\AI\stable-diffusion-webui-forge\modules\processing.py", line 922, in process_images_inner
    samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
  File "N:\AI\stable-diffusion-webui-forge\modules\processing.py", line 1275, in sample
    samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
  File "N:\AI\stable-diffusion-webui-forge\modules\sd_samplers_kdiffusion.py", line 251, in sample
    samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
  File "N:\AI\stable-diffusion-webui-forge\modules\sd_samplers_common.py", line 263, in launch_sampling
    return func()
  File "N:\AI\stable-diffusion-webui-forge\modules\sd_samplers_kdiffusion.py", line 251, in <lambda>
    samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
    denoised = model(x, sigmas[i] * s_in, **extra_args)
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
    return forward_call(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\modules\sd_samplers_cfg_denoiser.py", line 182, in forward
    denoised = forge_sampler.forge_sample(self, denoiser_params=denoiser_params,
  File "N:\AI\stable-diffusion-webui-forge\modules_forge\forge_sampler.py", line 88, in forge_sample
    denoised = sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options, seed)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\modules\samplers.py", line 289, in sampling_function
    cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond_, x, timestep, model_options)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\modules\samplers.py", line 258, in calc_cond_uncond_batch
    output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\modules\model_base.py", line 90, in apply_model
    model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
    return forward_call(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 867, in forward
    h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\ldm\modules\diffusionmodules\openaimodel.py", line 55, in forward_timestep_embed
    x = layer(x, context, transformer_options)
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
    return forward_call(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\ldm\modules\attention.py", line 620, in forward
    x = block(x, context=context[i], transformer_options=transformer_options)
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\venv\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
    return forward_call(*args, **kwargs)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\ldm\modules\attention.py", line 447, in forward
    return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\ldm\modules\diffusionmodules\util.py", line 194, in checkpoint
    return func(*inputs)
  File "N:\AI\stable-diffusion-webui-forge\ldm_patched\ldm\modules\attention.py", line 552, in _forward
    n = p(n, extra_options)
  File "N:\AI\stable-diffusion-webui-forge\extensions\sd-forge-couple\scripts\attention_couple.py", line 67, in attn2_output_patch
    mask_downsample = get_mask(
  File "N:\AI\stable-diffusion-webui-forge\extensions\sd-forge-couple\scripts\attention_masks.py", line 27, in get_mask
    mask_downsample = mask_downsample.view(num_conds, num_tokens, 1).repeat_interleave(
RuntimeError: shape '[3, 4160, 1]' is invalid for input of size 768
shape '[3, 4160, 1]' is invalid for input of size 768
*** Error completing request
*** Arguments: ('task(vqkdx3uwi27upfo)', <gradio.routes.Request object at 0x0000020E10B9F940>, '1girl\n1boy', '', [], 28, 'Euler a', 1, 1, 7, 1024, 1032, False, 0.45, 1.3, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], 0, False, '', 0.8, 2797109398, False, -1, 0, 0, 0, 0.0, 4, 512, 512, True, 'None', 'None', 0, True, 'Horizontal', 'None', '', 'Basic', [['0:0.5', '0.0:1.0', '1.0'], ['0.5:1.0', '0.0:1.0', '1.0']], False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, False, False, False, 0, False) {}
    Traceback (most recent call last):
      File "N:\AI\stable-diffusion-webui-forge\modules\call_queue.py", line 57, in f
        res = list(func(*args, **kwargs))
    TypeError: 'NoneType' object is not iterable
Haoming02 commented 4 months ago

Try these:

https://github.com/Haoming02/sd-forge-couple#typeerror-nonetype

trihardseven commented 4 months ago

posted this on the wrong repository, using multiples of 64 fixes it