When you use A1111's refiner feature, regardless of whether the first model is derived from sd1.5 or sdxl, it generates the first and last frames fine, but then hits a TypeError.
Traceback (most recent call last):File "/Users/**redacted**/stable-diffusion-webui/extensions/stable-diffusion-webui-prompt-travel/scripts/prompt_travel.py", line 621, in runrunner()File "/Users/**redacted**/stable-diffusion-webui/extensions/stable-diffusion-webui-prompt-travel/scripts/prompt_travel.py", line 655, in run_linearprocess_p()File "/Users/**redacted**/stable-diffusion-webui/extensions/stable-diffusion-webui-prompt-travel/scripts/prompt_travel.py", line 438, in process_p_binding_proc = process_images(self.p)File "/Users/**redacted**/stable-diffusion-webui/modules/processing.py", line 732, in process_imagesres = process_images_inner(p)File "/Users/**redacted**/stable-diffusion-webui/extensions/sd-webui-controlnet/scripts/batch_hijack.py", line 42, in processing_process_images_hijackreturn getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)File "/Users/**redacted**/stable-diffusion-webui/modules/processing.py", line 867, in process_images_innersamples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)File "/Users/**redacted**/stable-diffusion-webui/modules/processing.py", line 1140, in samplesamples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))File "/Users/**redacted**/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py", line 235, in samplesamples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))File "/Users/**redacted**/stable-diffusion-webui/modules/sd_samplers_common.py", line 261, in launch_samplingreturn func()File "/Users/**redacted**/stable-diffusion-webui/modules/sd_samplers_kdiffusion.py", line 235, in <lambda>samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/utils/_contextlib.py", line 115, in decorate_contextreturn func(*args, **kwargs)File "/Users/**redacted**/stable-diffusion-webui/repositories/k-diffusion/k_diffusion/sampling.py", line 594, in sample_dpmpp_2mdenoised = model(x, sigmas[i] * s_in, **extra_args)File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_implreturn forward_call(*args, **kwargs)File "/Users/**redacted**/stable-diffusion-webui/modules/sd_samplers_cfg_denoiser.py", line 95, in forwardif sd_samplers_common.apply_refiner(self):File "/Users/**redacted**/stable-diffusion-webui/modules/sd_samplers_common.py", line 185, in apply_refinersd_models.reload_model_weights(info=refiner_checkpoint_info)File "/Users/**redacted**/stable-diffusion-webui/modules/sd_models.py", line 751, in reload_model_weightsload_model(checkpoint_info, already_loaded_state_dict=state_dict)File "/Users/**redacted**/stable-diffusion-webui/modules/sd_models.py", line 626, in load_modelload_model_weights(sd_model, checkpoint_info, state_dict, timer)File "/Users/**redacted**/stable-diffusion-webui/modules/sd_models.py", line 353, in load_model_weightsmodel.load_state_dict(state_dict, strict=False)File "/Users/**redacted***/stable-diffusion-webui/modules/sd_disable_initialization.py", line 223, in <lambda>module_load_state_dict = self.replace(torch.nn.Module, 'load_state_dict', lambda *args, **kwargs: load_state_dict(module_load_state_dict, *args, **kwargs))File "/Users/**redacted**/stable-diffusion-webui/modules/sd_disable_initialization.py", line 221, in load_state_dictoriginal(module, state_dict, strict=strict)File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 2027, in load_state_dictload(self, state_dict)File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 2015, in loadload(child, child_state_dict, child_prefix)File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 2015, in loadload(child, child_state_dict, child_prefix)File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 2015, in loadload(child, child_state_dict, child_prefix)[Previous line repeated 3 more times]File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/nn/modules/module.py", line 2009, in loadmodule._load_from_state_dict(File "/Users/**redacted**/stable-diffusion-webui/modules/sd_disable_initialization.py", line 226, in <lambda>conv2d_load_from_state_dict = self.replace(torch.nn.Conv2d, '_load_from_state_dict', lambda *args, **kwargs: load_from_state_dict(conv2d_load_from_state_dict, *args, **kwargs))File "/Users/**redacted**/stable-diffusion-webui/modules/sd_disable_initialization.py", line 191, in load_from_state_dictmodule._parameters[name] = torch.nn.parameter.Parameter(torch.zeros_like(param, device=device, dtype=dtype), requires_grad=param.requires_grad)File "/Users**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/_meta_registrations.py", line 1780, in zeros_likereturn aten.empty_like.default(File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/_ops.py", line 287, in __call__return self._op(*args, **kwargs or {})File "/Users/**redacted**/stable-diffusion-webui/venv/lib/python3.10/site-packages/torch/_refs/__init__.py", line 4254, in empty_likereturn torch.empty_strided(TypeError: Cannot convert a MPS Tensor to float64 dtype as the MPS framework doesn't support float64. Please use float32 instead.
When you use A1111's refiner feature, regardless of whether the first model is derived from sd1.5 or sdxl, it generates the first and last frames fine, but then hits a TypeError.