BlenderNeko / ComfyUI_TiledKSampler

Tiled samplers for ComfyUI
GNU General Public License v3.0
297 stars 17 forks source link

Fails when using a model patched with Tome #4

Closed yrakwe closed 1 year ago

yrakwe commented 1 year ago

With different TOME patch ratios, tile sizes, and image sizes the error can occur instantly in the TiledKSampler or after some number of steps.

Here is an example workflow that I get the issue with.

JSON
{ "last_node_id": 11, "last_link_id": 16, "nodes": [ { "id": 7, "type": "CLIPTextEncode", "pos": [ 413, 389 ], "size": { "0": 425.27801513671875, "1": 180.6060791015625 }, "flags": {}, "order": 4, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 5 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 12 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "text, watermark" ] }, { "id": 6, "type": "CLIPTextEncode", "pos": [ 415, 186 ], "size": { "0": 422.84503173828125, "1": 164.31304931640625 }, "flags": {}, "order": 3, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", "link": 3 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 11 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "CLIPTextEncode" }, "widgets_values": [ "beautiful scenery nature glass bottle landscape, , purple galaxy bottle," ] }, { "id": 8, "type": "VAEDecode", "pos": [ 1209, 188 ], "size": { "0": 210, "1": 46 }, "flags": {}, "order": 6, "mode": 0, "inputs": [ { "name": "samples", "type": "LATENT", "link": 14 }, { "name": "vae", "type": "VAE", "link": 8 } ], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 9 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "VAEDecode" } }, { "id": 9, "type": "SaveImage", "pos": [ 1451, 189 ], "size": { "0": 210, "1": 270 }, "flags": {}, "order": 7, "mode": 0, "inputs": [ { "name": "images", "type": "IMAGE", "link": 9 } ], "properties": {}, "widgets_values": [ "ComfyUI" ] }, { "id": 4, "type": "CheckpointLoaderSimple", "pos": [ 26, 474 ], "size": { "0": 315, "1": 98 }, "flags": {}, "order": 0, "mode": 0, "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 15 ], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 3, 5 ], "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [ 8 ], "slot_index": 2 } ], "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "widgets_values": [ "sd15.ckpt" ] }, { "id": 5, "type": "EmptyLatentImage", "pos": [ 515, 616 ], "size": { "0": 315, "1": 106 }, "flags": {}, "order": 1, "mode": 0, "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 13 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "EmptyLatentImage" }, "widgets_values": [ 768, 640, 1 ] }, { "id": 11, "type": "TomePatchModel", "pos": [ 626, 89 ], "size": [ 210, 58 ], "flags": {}, "order": 2, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 15 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 16 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "TomePatchModel" }, "widgets_values": [ 0.3 ] }, { "id": 10, "type": "BNK_TiledKSamplerAdvanced", "pos": [ 873, 185 ], "size": { "0": 315, "1": 406 }, "flags": {}, "order": 5, "mode": 0, "inputs": [ { "name": "model", "type": "MODEL", "link": 16 }, { "name": "positive", "type": "CONDITIONING", "link": 11 }, { "name": "negative", "type": "CONDITIONING", "link": 12 }, { "name": "latent_image", "type": "LATENT", "link": 13 } ], "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 14 ], "slot_index": 0 } ], "properties": { "Node name for S&R": "BNK_TiledKSamplerAdvanced" }, "widgets_values": [ "enable", 56302674661794, "randomize", 512, 512, 1, 11, 6.5, "dpmpp_2m", "karras", 0, 10, "disable" ] } ], "links": [ [ 3, 4, 1, 6, 0, "CLIP" ], [ 5, 4, 1, 7, 0, "CLIP" ], [ 8, 4, 2, 8, 1, "VAE" ], [ 9, 8, 0, 9, 0, "IMAGE" ], [ 11, 6, 0, 10, 1, "CONDITIONING" ], [ 12, 7, 0, 10, 2, "CONDITIONING" ], [ 13, 5, 0, 10, 3, "LATENT" ], [ 14, 10, 0, 8, 0, "LATENT" ], [ 15, 4, 0, 11, 0, "MODEL" ], [ 16, 11, 0, 10, 0, "MODEL" ] ], "groups": [], "config": {}, "extra": {}, "version": 0.4 }

Here's my traceback:

  File "D:\ComfyUI\execution.py", line 195, in execute
    recursive_execute(self.server, prompt, self.outputs, x, extra_data, executed)
  File "D:\ComfyUI\execution.py", line 58, in recursive_execute
    recursive_execute(server, prompt, outputs, input_unique_id, extra_data, executed)
  File "D:\ComfyUI\execution.py", line 58, in recursive_execute
    recursive_execute(server, prompt, outputs, input_unique_id, extra_data, executed)
  File "D:\ComfyUI\execution.py", line 67, in recursive_execute
    outputs[unique_id] = getattr(obj, obj.FUNCTION)(**input_data_all)
  File "D:\ComfyUI\custom_nodes\ComfyUI_TiledKSampler\nodes.py", line 138, in sample
    tile_result = sampler.sample(noise_tile, pos, neg, cfg=cfg, latent_image=latent_tiles, start_step=start_at_step + i * steps_per_tile, last_step=start_at_step + i*steps_per_tile + steps_per_tile, force_full_denoise=force_full_denoise and i+1 == end_at_step - start_at_step, denoise_mask=masks, disable_pbar=True)
  File "D:\ComfyUI\comfy\samplers.py", line 669, in sample
    samples = getattr(k_diffusion_sampling, "sample_{}".format(self.sampler))(self.model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
    return func(*args, **kwargs)
  File "D:\ComfyUI\comfy\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
    denoised = model(x, sigmas[i] * s_in, **extra_args)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
  File "D:\ComfyUI\comfy\samplers.py", line 283, in forward
    out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, cond_concat=cond_concat, model_options=model_options)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
  File "D:\ComfyUI\comfy\k_diffusion\external.py", line 114, in forward
    eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
  File "D:\ComfyUI\comfy\k_diffusion\external.py", line 140, in get_eps
    return self.inner_model.apply_model(*args, **kwargs)
  File "D:\ComfyUI\comfy\samplers.py", line 271, in apply_model
    out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options)
  File "D:\ComfyUI\comfy\samplers.py", line 250, in sampling_function
    cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
  File "D:\ComfyUI\comfy\samplers.py", line 227, in calc_cond_uncond_batch
    output = model_function(input_x, timestep_, cond=c).chunk(batch_chunks)
  File "D:\ComfyUI\comfy\ldm\models\diffusion\ddpm.py", line 859, in apply_model
    x_recon = self.model(x_noisy, t, **cond)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
  File "D:\ComfyUI\comfy\ldm\models\diffusion\ddpm.py", line 1337, in forward
    out = self.diffusion_model(x, t, context=cc, control=control, transformer_options=transformer_options)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
  File "D:\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 821, in forward
    h = forward_timestep_embed(module, h, emb, context, transformer_options)
  File "D:\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 97, in forward_timestep_embed
    x = layer(x, context, transformer_options)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\accelerate\hooks.py", line 165, in new_forward
    output = old_forward(*args, **kwargs)
  File "D:\ComfyUI\comfy\ldm\modules\attention.py", line 634, in forward
    x = block(x, context=context[i], transformer_options=transformer_options)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
    return forward_call(*input, **kwargs)
  File "D:\stable-diffusion-webui\venv\lib\site-packages\accelerate\hooks.py", line 165, in new_forward
    output = old_forward(*args, **kwargs)
  File "D:\ComfyUI\comfy\ldm\modules\attention.py", line 523, in forward
    return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
  File "D:\ComfyUI\comfy\ldm\modules\diffusionmodules\util.py", line 121, in checkpoint
    return CheckpointFunction.apply(func, len(inputs), *args)
  File "D:\ComfyUI\comfy\ldm\modules\diffusionmodules\util.py", line 136, in forward
    output_tensors = ctx.run_function(*ctx.input_tensors)
  File "D:\ComfyUI\comfy\ldm\modules\attention.py", line 550, in _forward
    m, u = tomesd.get_functions(x, transformer_options["tomesd"]["ratio"], transformer_options["original_shape"])
  File "D:\ComfyUI\comfy\ldm\modules\tomesd.py", line 140, in get_functions
    m, u = bipartite_soft_matching_random2d(x, w, h, stride_x, stride_y, r, no_rand)
  File "D:\ComfyUI\comfy\ldm\modules\tomesd.py", line 92, in bipartite_soft_matching_random2d
    node_max, node_idx = scores.max(dim=-1)
IndexError: max(): Expected reduction dim 2 to have non-zero size.

BlenderNeko commented 1 year ago

will probably need to PR ComfyUI to fix this

BlenderNeko commented 1 year ago

should now be fixed on latest commit of ComfyUI