Closed GalaxyTimeMachine closed 1 year ago
Perhaps adding the total steps to the pipe loader and then piping that through would be better? Then just the start/end steps need to be set.
It's also too easy to get those step numbers wrong (see image) and get an error like the one below:
Traceback (most recent call last):
File "F:\ComfyUI_windows_portable\ComfyUI\execution.py", line 144, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "F:\ComfyUI_windows_portable\ComfyUI\execution.py", line 74, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "F:\ComfyUI_windows_portable\ComfyUI\execution.py", line 67, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "F:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_tinyterraNodes\tinyterraNodes.py", line 1275, in sample return ttN_TSC_pipeKSampler.sample(self, pipe, lora_name, lora_model_strength, lora_clip_strength, sampler_state, steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise,
File "F:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_tinyterraNodes\tinyterraNodes.py", line 1195, in sample return process_sample_state(self, pipe, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent)
File "F:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_tinyterraNodes\tinyterraNodes.py", line 766, in process_sample_state
pipe["vars"]["samples"] = common_ksampler(pipe["vars"]["model"], pipe["vars"]["seed"], steps, cfg, sampler_name, scheduler, pipe["vars"]["positive"], pipe["vars"]["negative"], pipe["vars"]["samples"], denoise=denoise, preview_latent=preview_latent, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, disable_noise=disable_noise)
File "F:\ComfyUI_windows_portable\ComfyUI\custom_nodes\ComfyUI_tinyterraNodes\tinyterraNodes.py", line 324, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\sample.py", line 88, in sample
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 676, in sample
samples, _ = sampler.sample_custom(ddim_timesteps=timesteps,
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\models\diffusion\ddim.py", line 89, in sample_custom
samples, intermediates = self.ddim_sampling(conditioning, x_T.shape,
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\models\diffusion\ddim.py", line 231, in ddim_sampling
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\utils\_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\models\diffusion\ddim.py", line 265, in p_sample_ddim
model_output = denoise_function(x, t, **extra_args)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\external.py", line 98, in predict_eps_discrete_timestep
return (input - self(input, sigma, **kwargs)) / utils.append_dims(sigma, input.ndim)
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\external.py", line 125, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\k_diffusion\external.py", line 151, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 307, in apply_model
out = sampling_function(self.inner_model.apply_model, x, timestep, uncond, cond, cond_scale, cond_concat, model_options=model_options, seed=seed)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 285, in sampling_function
cond, uncond = calc_cond_uncond_batch(model_function, cond, uncond, x, timestep, max_total_area, cond_concat, model_options)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\samplers.py", line 262, in calc_cond_uncond_batch
output = model_function(input_x, timestep_, **c).chunk(batch_chunks)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\model_base.py", line 61, in apply_model
return self.diffusion_model(xc, t, context=context, y=c_adm, control=control, transformer_options=transformer_options).float()
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 834, in forward
h = forward_timestep_embed(module, h, emb, context, transformer_options)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\diffusionmodules\openaimodel.py", line 97, in forward_timestep_embed
x = layer(x, context, transformer_options)
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 695, in forward
x = block(x, context=context[i], transformer_options=transformer_options)
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 527, in forward
return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\diffusionmodules\util.py", line 123, in checkpoint
return func(*inputs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 627, in _forward
n = self.attn2(n, context=context_attn2, value=value_attn2)
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ldm\modules\attention.py", line 422, in forward
k = self.to_k(context)
File "F:\ComfyUI_windows_portable\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "F:\ComfyUI_windows_portable\ComfyUI\comfy\ops.py", line 18, in forward
return torch.nn.functional.linear(input, self.weight, self.bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (308x2048 and 1280x768)
Do you have a working example flow only using the default comfy nodes that i could compare with, to see what the intended outcome should be a little clearer?
This is what I started with, and wanted to replace everything in the circled area with what I have in the first image I posted when opening this report. The workflow for that is in this image: Because of the issue I initially reported, this is as far as I can go, using your nodes: The workflow for that is in this image:
Workflow is in this image - with the circled region of your first image replaced with pipe nodes.
I'll have a think about adding a separate SDXL loader/sampler, but I haven't had time to at this stage.
Works perfectly, thank you so much!
I've configured a workflow that looks correct for SDXL, but the preview output shows that the refiner stage is not doing anything with the leftover latent noise. I've tried varying the steps up and down, to no avail. Also, it would be nice to not need to enter the number of steps in the refiner stage, and it calculates it from the remaining steps from the previous stage.
Hopefully the image makes it clear.