huchenlei / ComfyUI-layerdiffuse

Layer Diffuse custom nodes
Apache License 2.0
1.44k stars 142 forks source link

[Bug]: !!! Exception during processing!!! 1024 #78

Open forgetphp opened 4 months ago

forgetphp commented 4 months ago

What happened? I use the workflow which list below, get this error !!! Exception during processing!!! 1024

System Info Apple M1 Max

Workflow json file bug.json

{
  "last_node_id": 55,
  "last_link_id": 107,
  "nodes": [
    {
      "id": 7,
      "type": "InvertMask",
      "pos": [
        2438,
        12
      ],
      "size": {
        "0": 210,
        "1": 26
      },
      "flags": {},
      "order": 15,
      "mode": 0,
      "inputs": [
        {
          "name": "mask",
          "type": "MASK",
          "link": 5
        }
      ],
      "outputs": [
        {
          "name": "MASK",
          "type": "MASK",
          "links": [
            7
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "InvertMask"
      }
    },
    {
      "id": 8,
      "type": "JoinImageWithAlpha",
      "pos": [
        2687.0218056640624,
        -107.47481201171871
      ],
      "size": {
        "0": 210,
        "1": 46
      },
      "flags": {},
      "order": 18,
      "mode": 0,
      "inputs": [
        {
          "name": "image",
          "type": "IMAGE",
          "link": 6
        },
        {
          "name": "alpha",
          "type": "MASK",
          "link": 7
        }
      ],
      "outputs": [
        {
          "name": "IMAGE",
          "type": "IMAGE",
          "links": [
            8
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "JoinImageWithAlpha"
      }
    },
    {
      "id": 9,
      "type": "PreviewImage",
      "pos": [
        3092.7532392578128,
        -90.8581088867187
      ],
      "size": {
        "0": 210,
        "1": 246
      },
      "flags": {},
      "order": 21,
      "mode": 0,
      "inputs": [
        {
          "name": "images",
          "type": "IMAGE",
          "link": 8
        }
      ],
      "properties": {
        "Node name for S&R": "PreviewImage"
      }
    },
    {
      "id": 4,
      "type": "GroundingDinoSAMSegment (segment anything)",
      "pos": [
        1973,
        -80
      ],
      "size": {
        "0": 352.79998779296875,
        "1": 122
      },
      "flags": {},
      "order": 12,
      "mode": 0,
      "inputs": [
        {
          "name": "sam_model",
          "type": "SAM_MODEL",
          "link": 16
        },
        {
          "name": "grounding_dino_model",
          "type": "GROUNDING_DINO_MODEL",
          "link": 15
        },
        {
          "name": "image",
          "type": "IMAGE",
          "link": 11
        }
      ],
      "outputs": [
        {
          "name": "IMAGE",
          "type": "IMAGE",
          "links": [
            6
          ],
          "shape": 3,
          "slot_index": 0
        },
        {
          "name": "MASK",
          "type": "MASK",
          "links": [
            5
          ],
          "shape": 3,
          "slot_index": 1
        }
      ],
      "properties": {
        "Node name for S&R": "GroundingDinoSAMSegment (segment anything)"
      },
      "widgets_values": [
        "head",
        0.3
      ]
    },
    {
      "id": 3,
      "type": "GroundingDinoModelLoader (segment anything)",
      "pos": [
        977,
        -188
      ],
      "size": {
        "0": 361.20001220703125,
        "1": 58
      },
      "flags": {},
      "order": 0,
      "mode": 0,
      "outputs": [
        {
          "name": "GROUNDING_DINO_MODEL",
          "type": "GROUNDING_DINO_MODEL",
          "links": [
            13
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "GroundingDinoModelLoader (segment anything)"
      },
      "widgets_values": [
        "GroundingDINO_SwinB (938MB)"
      ]
    },
    {
      "id": 5,
      "type": "SAMModelLoader (segment anything)",
      "pos": [
        989,
        -55
      ],
      "size": {
        "0": 315,
        "1": 58
      },
      "flags": {},
      "order": 1,
      "mode": 0,
      "outputs": [
        {
          "name": "SAM_MODEL",
          "type": "SAM_MODEL",
          "links": [
            14
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "SAMModelLoader (segment anything)"
      },
      "widgets_values": [
        "sam_hq_vit_l (1.25GB)"
      ]
    },
    {
      "id": 16,
      "type": "Reroute",
      "pos": [
        1734.8107407921452,
        -85.95547175110397
      ],
      "size": [
        75,
        26
      ],
      "flags": {},
      "order": 6,
      "mode": 0,
      "inputs": [
        {
          "name": "",
          "type": "*",
          "link": 13
        }
      ],
      "outputs": [
        {
          "name": "",
          "type": "GROUNDING_DINO_MODEL",
          "links": [
            15,
            17
          ],
          "slot_index": 0
        }
      ],
      "properties": {
        "showOutputText": false,
        "horizontal": false
      }
    },
    {
      "id": 17,
      "type": "Reroute",
      "pos": [
        1731,
        -31
      ],
      "size": [
        75,
        26
      ],
      "flags": {},
      "order": 7,
      "mode": 0,
      "inputs": [
        {
          "name": "",
          "type": "*",
          "link": 14
        }
      ],
      "outputs": [
        {
          "name": "",
          "type": "SAM_MODEL",
          "links": [
            16,
            18
          ],
          "slot_index": 0
        }
      ],
      "properties": {
        "showOutputText": false,
        "horizontal": false
      }
    },
    {
      "id": 18,
      "type": "JoinImageWithAlpha",
      "pos": [
        2601,
        187
      ],
      "size": {
        "0": 210,
        "1": 46
      },
      "flags": {},
      "order": 19,
      "mode": 0,
      "inputs": [
        {
          "name": "image",
          "type": "IMAGE",
          "link": 19
        },
        {
          "name": "alpha",
          "type": "MASK",
          "link": 21
        }
      ],
      "outputs": [
        {
          "name": "IMAGE",
          "type": "IMAGE",
          "links": [
            22
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "JoinImageWithAlpha"
      }
    },
    {
      "id": 19,
      "type": "InvertMask",
      "pos": [
        2570,
        302
      ],
      "size": {
        "0": 210,
        "1": 26
      },
      "flags": {},
      "order": 16,
      "mode": 0,
      "inputs": [
        {
          "name": "mask",
          "type": "MASK",
          "link": 20
        }
      ],
      "outputs": [
        {
          "name": "MASK",
          "type": "MASK",
          "links": [
            21
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "InvertMask"
      }
    },
    {
      "id": 14,
      "type": "GroundingDinoSAMSegment (segment anything)",
      "pos": [
        1977,
        172
      ],
      "size": {
        "0": 352.79998779296875,
        "1": 122
      },
      "flags": {},
      "order": 13,
      "mode": 0,
      "inputs": [
        {
          "name": "sam_model",
          "type": "SAM_MODEL",
          "link": 18
        },
        {
          "name": "grounding_dino_model",
          "type": "GROUNDING_DINO_MODEL",
          "link": 17
        },
        {
          "name": "image",
          "type": "IMAGE",
          "link": 12
        }
      ],
      "outputs": [
        {
          "name": "IMAGE",
          "type": "IMAGE",
          "links": [
            19
          ],
          "shape": 3,
          "slot_index": 0
        },
        {
          "name": "MASK",
          "type": "MASK",
          "links": [
            20
          ],
          "shape": 3,
          "slot_index": 1
        }
      ],
      "properties": {
        "Node name for S&R": "GroundingDinoSAMSegment (segment anything)"
      },
      "widgets_values": [
        "(floating hair:1.1)",
        0.3
      ]
    },
    {
      "id": 20,
      "type": "PreviewImage",
      "pos": [
        3090,
        227
      ],
      "size": {
        "0": 210,
        "1": 246
      },
      "flags": {},
      "order": 22,
      "mode": 0,
      "inputs": [
        {
          "name": "images",
          "type": "IMAGE",
          "link": 22
        }
      ],
      "properties": {
        "Node name for S&R": "PreviewImage"
      }
    },
    {
      "id": 22,
      "type": "VAELoader",
      "pos": [
        -205,
        500
      ],
      "size": {
        "0": 315,
        "1": 58
      },
      "flags": {},
      "order": 2,
      "mode": 0,
      "outputs": [
        {
          "name": "VAE",
          "type": "VAE",
          "links": [
            42
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "VAELoader"
      },
      "widgets_values": [
        "animevae.pt"
      ]
    },
    {
      "id": 34,
      "type": "Reroute",
      "pos": [
        162,
        421
      ],
      "size": [
        75,
        26
      ],
      "flags": {},
      "order": 8,
      "mode": 0,
      "inputs": [
        {
          "name": "",
          "type": "*",
          "link": 42
        }
      ],
      "outputs": [
        {
          "name": "",
          "type": "VAE",
          "links": [
            44
          ],
          "slot_index": 0
        }
      ],
      "properties": {
        "showOutputText": false,
        "horizontal": false
      }
    },
    {
      "id": 33,
      "type": "VAEDecode",
      "pos": [
        1545,
        374
      ],
      "size": {
        "0": 210,
        "1": 46
      },
      "flags": {},
      "order": 20,
      "mode": 0,
      "inputs": [
        {
          "name": "samples",
          "type": "LATENT",
          "link": 104
        },
        {
          "name": "vae",
          "type": "VAE",
          "link": 44
        }
      ],
      "outputs": [
        {
          "name": "IMAGE",
          "type": "IMAGE",
          "links": [
            45
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "VAEDecode"
      }
    },
    {
      "id": 2,
      "type": "LoadImage",
      "pos": [
        -253,
        33
      ],
      "size": {
        "0": 315,
        "1": 314
      },
      "flags": {},
      "order": 3,
      "mode": 0,
      "outputs": [
        {
          "name": "IMAGE",
          "type": "IMAGE",
          "links": [
            10
          ],
          "shape": 3,
          "slot_index": 0
        },
        {
          "name": "MASK",
          "type": "MASK",
          "links": [],
          "shape": 3,
          "slot_index": 1
        }
      ],
      "properties": {
        "Node name for S&R": "LoadImage"
      },
      "widgets_values": [
        "00044.png",
        "image"
      ]
    },
    {
      "id": 35,
      "type": "PreviewImage",
      "pos": [
        1882,
        447
      ],
      "size": {
        "0": 210,
        "1": 26
      },
      "flags": {},
      "order": 23,
      "mode": 0,
      "inputs": [
        {
          "name": "images",
          "type": "IMAGE",
          "link": 45
        }
      ],
      "properties": {
        "Node name for S&R": "PreviewImage"
      }
    },
    {
      "id": 25,
      "type": "CheckpointLoaderSimple",
      "pos": [
        -216,
        653
      ],
      "size": {
        "0": 315,
        "1": 98
      },
      "flags": {},
      "order": 4,
      "mode": 0,
      "outputs": [
        {
          "name": "MODEL",
          "type": "MODEL",
          "links": [
            87
          ],
          "shape": 3,
          "slot_index": 0
        },
        {
          "name": "CLIP",
          "type": "CLIP",
          "links": [
            27,
            28
          ],
          "shape": 3,
          "slot_index": 1
        },
        {
          "name": "VAE",
          "type": "VAE",
          "links": null,
          "shape": 3,
          "slot_index": 2
        }
      ],
      "properties": {
        "Node name for S&R": "CheckpointLoaderSimple"
      },
      "widgets_values": [
        "A333ANYTSimpler_a333.safetensors"
      ]
    },
    {
      "id": 15,
      "type": "Reroute",
      "pos": [
        220,
        69
      ],
      "size": [
        75,
        26
      ],
      "flags": {},
      "order": 9,
      "mode": 0,
      "inputs": [
        {
          "name": "",
          "type": "*",
          "link": 10
        }
      ],
      "outputs": [
        {
          "name": "",
          "type": "IMAGE",
          "links": [
            11,
            12,
            88
          ],
          "slot_index": 0
        }
      ],
      "properties": {
        "showOutputText": false,
        "horizontal": false
      }
    },
    {
      "id": 51,
      "type": "LayeredDiffusionCondJointApply",
      "pos": [
        462,
        254
      ],
      "size": [
        406.70674811967126,
        128.80830434523125
      ],
      "flags": {},
      "order": 14,
      "mode": 0,
      "inputs": [
        {
          "name": "model",
          "type": "MODEL",
          "link": 87,
          "slot_index": 0
        },
        {
          "name": "image",
          "type": "IMAGE",
          "link": 88
        },
        {
          "name": "cond",
          "type": "CONDITIONING",
          "link": null
        },
        {
          "name": "blended_cond",
          "type": "CONDITIONING",
          "link": null
        }
      ],
      "outputs": [
        {
          "name": "MODEL",
          "type": "MODEL",
          "links": [
            103
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "LayeredDiffusionCondJointApply"
      },
      "widgets_values": [
        "SD15, Foreground, attn_sharing, Batch size (2N)"
      ]
    },
    {
      "id": 26,
      "type": "CLIPTextEncode",
      "pos": [
        260,
        587
      ],
      "size": {
        "0": 400,
        "1": 200
      },
      "flags": {},
      "order": 10,
      "mode": 0,
      "inputs": [
        {
          "name": "clip",
          "type": "CLIP",
          "link": 27
        }
      ],
      "outputs": [
        {
          "name": "CONDITIONING",
          "type": "CONDITIONING",
          "links": [
            105
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "CLIPTextEncode"
      },
      "widgets_values": [
        "1girl"
      ]
    },
    {
      "id": 27,
      "type": "CLIPTextEncode",
      "pos": [
        229,
        872
      ],
      "size": {
        "0": 400,
        "1": 200
      },
      "flags": {},
      "order": 11,
      "mode": 0,
      "inputs": [
        {
          "name": "clip",
          "type": "CLIP",
          "link": 28
        }
      ],
      "outputs": [
        {
          "name": "CONDITIONING",
          "type": "CONDITIONING",
          "links": [
            106
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "CLIPTextEncode"
      },
      "widgets_values": [
        "text"
      ]
    },
    {
      "id": 32,
      "type": "EmptyLatentImage",
      "pos": [
        820,
        783
      ],
      "size": {
        "0": 315,
        "1": 106
      },
      "flags": {},
      "order": 5,
      "mode": 0,
      "outputs": [
        {
          "name": "LATENT",
          "type": "LATENT",
          "links": [
            107
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "EmptyLatentImage"
      },
      "widgets_values": [
        512,
        512,
        4
      ]
    },
    {
      "id": 55,
      "type": "KSampler",
      "pos": [
        1077,
        345
      ],
      "size": {
        "0": 315,
        "1": 262
      },
      "flags": {},
      "order": 17,
      "mode": 0,
      "inputs": [
        {
          "name": "model",
          "type": "MODEL",
          "link": 103
        },
        {
          "name": "positive",
          "type": "CONDITIONING",
          "link": 105
        },
        {
          "name": "negative",
          "type": "CONDITIONING",
          "link": 106
        },
        {
          "name": "latent_image",
          "type": "LATENT",
          "link": 107
        }
      ],
      "outputs": [
        {
          "name": "LATENT",
          "type": "LATENT",
          "links": [
            104
          ],
          "shape": 3,
          "slot_index": 0
        }
      ],
      "properties": {
        "Node name for S&R": "KSampler"
      },
      "widgets_values": [
        323212994150883,
        "randomize",
        20,
        8,
        "euler",
        "normal",
        0.5
      ]
    }
  ],
  "links": [
    [
      5,
      4,
      1,
      7,
      0,
      "MASK"
    ],
    [
      6,
      4,
      0,
      8,
      0,
      "IMAGE"
    ],
    [
      7,
      7,
      0,
      8,
      1,
      "MASK"
    ],
    [
      8,
      8,
      0,
      9,
      0,
      "IMAGE"
    ],
    [
      10,
      2,
      0,
      15,
      0,
      "*"
    ],
    [
      11,
      15,
      0,
      4,
      2,
      "IMAGE"
    ],
    [
      12,
      15,
      0,
      14,
      2,
      "IMAGE"
    ],
    [
      13,
      3,
      0,
      16,
      0,
      "*"
    ],
    [
      14,
      5,
      0,
      17,
      0,
      "*"
    ],
    [
      15,
      16,
      0,
      4,
      1,
      "GROUNDING_DINO_MODEL"
    ],
    [
      16,
      17,
      0,
      4,
      0,
      "SAM_MODEL"
    ],
    [
      17,
      16,
      0,
      14,
      1,
      "GROUNDING_DINO_MODEL"
    ],
    [
      18,
      17,
      0,
      14,
      0,
      "SAM_MODEL"
    ],
    [
      19,
      14,
      0,
      18,
      0,
      "IMAGE"
    ],
    [
      20,
      14,
      1,
      19,
      0,
      "MASK"
    ],
    [
      21,
      19,
      0,
      18,
      1,
      "MASK"
    ],
    [
      22,
      18,
      0,
      20,
      0,
      "IMAGE"
    ],
    [
      27,
      25,
      1,
      26,
      0,
      "CLIP"
    ],
    [
      28,
      25,
      1,
      27,
      0,
      "CLIP"
    ],
    [
      42,
      22,
      0,
      34,
      0,
      "*"
    ],
    [
      44,
      34,
      0,
      33,
      1,
      "VAE"
    ],
    [
      45,
      33,
      0,
      35,
      0,
      "IMAGE"
    ],
    [
      87,
      25,
      0,
      51,
      0,
      "MODEL"
    ],
    [
      88,
      15,
      0,
      51,
      1,
      "IMAGE"
    ],
    [
      103,
      51,
      0,
      55,
      0,
      "MODEL"
    ],
    [
      104,
      55,
      0,
      33,
      0,
      "LATENT"
    ],
    [
      105,
      26,
      0,
      55,
      1,
      "CONDITIONING"
    ],
    [
      106,
      27,
      0,
      55,
      2,
      "CONDITIONING"
    ],
    [
      107,
      32,
      0,
      55,
      3,
      "LATENT"
    ]
  ],
  "groups": [],
  "config": {},
  "extra": {},
  "version": 0.4
}

Error Logs:

got prompt
  0%|                                                                                                                                                                                          | 0/20 [00:00<?, ?it/s]
!!! Exception during processing!!! 1024
Traceback (most recent call last):
  File "/Users/chenhong/perk/ComfyUI/execution.py", line 151, in recursive_execute
    output_data, output_ui = get_output_data(obj, input_data_all)
                             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/execution.py", line 81, in get_output_data
    return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/execution.py", line 74, in map_node_over_list
    results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/nodes.py", line 1344, in sample
    return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/nodes.py", line 1314, in common_ksampler
    samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/sample.py", line 37, in sample
    samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 761, in sample
    return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 663, in sample
    return cfg_guider.sample(noise, latent_image, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 650, in sample
    output = self.inner_sample(noise, latent_image, device, sampler, sigmas, denoise_mask, callback, disable_pbar, seed)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 629, in inner_sample
    samples = sampler.sample(self, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 534, in sample
    samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/k_diffusion/sampling.py", line 137, in sample_euler
    denoised = model(x, sigma_hat * s_in, **extra_args)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 272, in __call__
    out = self.inner_model(x, sigma, model_options=model_options, seed=seed)
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 616, in __call__
    return self.predict_noise(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 619, in predict_noise
    return sampling_function(self.inner_model, x, timestep, self.conds.get("negative", None), self.conds.get("positive", None), self.cfg, model_options=model_options, seed=seed)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 258, in sampling_function
    out = calc_cond_batch(model, conds, x, timestep, model_options)
          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/samplers.py", line 218, in calc_cond_batch
    output = model.apply_model(input_x, timestep_, **c).chunk(batch_chunks)
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/model_base.py", line 97, in apply_model
    model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
                   ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/ldm/modules/diffusionmodules/openaimodel.py", line 850, in forward
    h = forward_timestep_embed(module, h, emb, context, transformer_options, time_context=time_context, num_video_frames=num_video_frames, image_only_indicator=image_only_indicator)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/ldm/modules/diffusionmodules/openaimodel.py", line 44, in forward_timestep_embed
    x = layer(x, context, transformer_options)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/ldm/modules/attention.py", line 633, in forward
    x = block(x, context=context[i], transformer_options=transformer_options)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/custom_nodes/ComfyUI-layerdiffuse/lib_layerdiffusion/attention_sharing.py", line 253, in forward
    return func(self, x, context, transformer_options)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/ldm/modules/attention.py", line 460, in forward
    return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/ldm/modules/diffusionmodules/util.py", line 191, in checkpoint
    return func(*inputs)
           ^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/comfy/ldm/modules/attention.py", line 520, in _forward
    n = self.attn1(n, context=context_attn1, value=value_attn1)
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/opt/anaconda3/lib/python3.11/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/chenhong/perk/ComfyUI/custom_nodes/ComfyUI-layerdiffuse/lib_layerdiffusion/attention_sharing.py", line 171, in forward
    control_signal = self.control_signals[context_dim].to(
                     ~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^
KeyError: 1024