Open zqlong123 opened 9 months ago
Update AnimateDiff-Evolved and then the error will go away.
Thanks! Let me try
The second post is about the new one: AnimateDiff-Evolved, but the problem has not been solved yet.
The second update is: AnimateDiff-Evolved, the problem has not been solved yet
There is no need to make a second post, we can keep it on this one. And if updating didn't fix it, that means your update didn't work. We can confirm this by you looking at the contents of the comfy_nodes/ComfyUI-AnimateDiff-Evolved/animatediff/motion_utils.py file. If it is updated, CrossAttentionMM should look like this:
If it doesn't look like that, then whatever you did to try to update did not work. How did you go about trying to update?
There is no such file ,/motion_utils.py
Woops, I had a typo, motion_utils is in animatediff folder there.
motion_utils.py,What should I use to open this file? When I double-click to open it, nothing happens.
I am Chinese, which country are you from?
You can look into the file with any text editor
import hashlib import os from pathlib import Path from typing import Callable
import numpy as np
import folder_paths from comfy.model_base import SDXL, BaseModel, model_sampling from comfy.model_management import xformers_enabled from comfy.model_patcher import ModelPatcher
class IsChangedHelper: def init(self): self.val = 0
def no_change(self):
return self.val
def change(self):
self.val = (self.val + 1) % 100
class ModelSamplingConfig: def init(self, beta_schedule: str): self.beta_schedule = beta_schedule
class BetaSchedules: SQRT_LINEAR = "sqrt_linear (AnimateDiff)" LINEAR = "linear (HotshotXL/default)" SQRT = "sqrt" COSINE = "cosine" SQUAREDCOS_CAP_V2 = "squaredcos_cap_v2"
ALIAS_LIST = [SQRT_LINEAR, LINEAR, SQRT, COSINE, SQUAREDCOS_CAP_V2]
ALIAS_MAP = {
SQRT_LINEAR: "sqrt_linear",
LINEAR: "linear",
SQRT: "sqrt",
COSINE: "cosine",
SQUAREDCOS_CAP_V2: "squaredcos_cap_v2",
}
@classmethod
def to_name(cls, alias: str):
return cls.ALIAS_MAP[alias]
@classmethod
def to_config(cls, alias: str) -> ModelSamplingConfig:
return ModelSamplingConfig(cls.to_name(alias))
@classmethod
def to_model_sampling(cls, alias: str, model: ModelPatcher):
return model_sampling(cls.to_config(alias), model_type=model.model.model_type)
@staticmethod
def get_alias_list_with_first_element(first_element: str):
new_list = BetaSchedules.ALIAS_LIST.copy()
element_index = new_list.index(first_element)
new_list[0], new_list[element_index] = new_list[element_index], new_list[0]
return new_list
class BetaScheduleCache: def init(self, model: ModelPatcher): self.model_sampling = model.model.model_sampling
def use_cached_beta_schedule_and_clean(self, model: ModelPatcher):
model.model.model_sampling = self.model_sampling
self.clean()
def clean(self):
self.model_sampling = None
class Folders: ANIMATEDIFF_MODELS = "AnimateDiffEvolved_Models" MOTION_LORA = "AnimateDiffMotion_LoRA"
folder_paths.folder_names_and_paths[Folders.ANIMATEDIFF_MODELS] = ( [ str(Path(file).parent.parent / "models") ], folder_paths.supported_pt_extensions )
folder_paths.folder_names_and_paths[Folders.MOTION_LORA] = ( [ str(Path(file).parent.parent / "motion_lora") ], folder_paths.supported_pt_extensions )
folder_paths.folder_names_and_paths["video_formats"] = ( [ os.path.join(os.path.dirname(os.path.abspath(file)), "..", "video_formats"), ], [".json"] )
def get_available_motion_models(): return folder_paths.get_filename_list(Folders.ANIMATEDIFF_MODELS)
def get_motion_model_path(model_name: str): return folder_paths.get_full_path(Folders.ANIMATEDIFF_MODELS, model_name)
def get_available_motion_loras(): return folder_paths.get_filename_list(Folders.MOTION_LORA)
def get_motion_lora_path(lora_name: str): return folder_paths.get_full_path(Folders.MOTION_LORA, lora_name)
def calculate_file_hash(filename: str, hash_every_n: int = 50): h = hashlib.sha256() b = bytearray(1024*1024) mv = memoryview(b) with open(filename, 'rb', buffering=0) as f: i = 0
while n := f.readinto(mv):
if i%hash_every_n == 0:
h.update(mv[:n])
i += 1
return h.hexdigest()
def calculate_model_hash(model: ModelPatcher): unet = model.model.diff t = unet.input_blocks[1] m = hashlib.sha256() for buf in t.buffers(): m.update(buf.cpu().numpy().view(np.uint8)) return m.hexdigest()
class ModelTypesSD: SD1_5 = "sd1_5" SDXL = "sdxl"
def get_sd_model_type(model: ModelPatcher) -> str: if model is None: return None if is_checkpoint_sd1_5(model): return ModelTypesSD.SD1_5 elif is_checkpoint_sdxl(model): return ModelTypesSD.SDXL return False
def is_checkpoint_sd1_5(model: ModelPatcher): if model is None: return False model_type = type(model.model) return model_type == BaseModel
def is_checkpoint_sdxl(model: ModelPatcher): if model is None: return False model_type = type(model.model) return model_type == SDXL
def raise_if_not_checkpoint_sd1_5(model: ModelPatcher): if not is_checkpoint_sd1_5(model): raise ValueError(f"For AnimateDiff, SD Checkpoint (model) is expected to be SD1.5-based (BaseModel), but was: {type(model.model).name}")
def wrap_function_to_inject_xformers_bug_info(function_to_wrap: Callable) -> Callable: if not xformers_enabled: return function_to_wrap else: def wrapped_function(*args, *kwargs): try: return function_to_wrap(args, **kwargs) except RuntimeError as e: if str(e).startswith("CUDA error: invalid configuration argument"): raise RuntimeError(f"An xformers bug was encountered in AnimateDiff - this is unexpected, \ report this to Kosinkadink/ComfyUI-AnimateDiff-Evolved repo as an issue, \ and a workaround for now is to run ComfyUI with the --disable-xformers argument.") raise return wrapped_function
That's the model_utils file instead of motion_utils, but even from that, I can tell you're not on the latest version of AnimateDiff-Evolved. Update AnimateDiff-Evolved either though the manager (Update All in the menu), or manually through the git pull command inside the ComfyUI-AnimateDiff-Evolved directory.
Thanks! I will try again
Updated everything, even did a git reset --hard, didn't work, same error. Rolled back to d8eddc785633877cb186470c9ecd6c19916396bb and it worked.
@bparrott78 what was the error you had? The commit you list there is the one that introduced the fix, and the latest version has the same code there in it. So you may have been outdated somehow, and your attempt at a rollback actually brought you forward.
Same error as the guy above, I'll try to move back to current and see if that works.
There is no issue with the latest version of the code. You just need to be sure you are actually using it.
Figured it out. I was on the new injection branch, rejoining the main branch solved it
Gotcha. New injection was never a good branch to be in tbh other than testing things out a few weeks ago, the real fixes for model patching stuff got introduced to main branch last week
@zqlong123 assuming you're still trying to update, also make sure you are in main brach
Yeah I hadn't used animatediff for a week so that makes sense.
I'm gonna go ahead and delete that branch to avoid confusion
I'll try again today, it was too late yesterday and I had to rest and sleep.
Error occurred when executing IPAdapterApply:
Error(s) in loading state_dict for Resampler: size mismatch for proj_in.weight: copying a param with shape torch.Size([768, 1280]) from checkpoint, the shape in current model is torch.Size([768, 1024]).
File "F:\Blender_ComfyUI\ComfyUI\execution.py", line 153, in recursive_execute output_data, output_ui = get_output_data(obj, input_data_all) File "F:\Blender_ComfyUI\ComfyUI\execution.py", line 83, in get_output_data return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) File "F:\Blender_ComfyUI\ComfyUI\execution.py", line 76, in map_node_over_list results.append(getattr(obj, func)(**slice_dict(input_data_all, i))) File "F:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI_IPAdapter_plus\IPAdapterPlus.py", line 426, in apply_ipadapter self.ipadapter = IPAdapter( File "F:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI_IPAdapter_plus\IPAdapterPlus.py", line 175, in init self.image_proj_model.load_state_dict(ipadapter_model["image_proj"]) File "F:\Blender_ComfyUI\python_embeded\lib\site-packages\torch\nn\modules\module.py", line 2152, in load_state_dict raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
hi Can you help me figure out what the problem is?
@zqlong123 - when you boot up comfyui - there's errors that occur. Check the logs - disable problematic extensions. I found other ones that had nothing todo with this repo - and was disabled it worked.
Error occurred when executing ADE_AnimateDiffLoaderWithContext:
module 'comfy.ops' has no attribute 'Linear'
File "H:\Blender_ComfyUI\ComfyUI\execution.py", line 153, in recursive_execute output_data, output_ui = get_output_data(obj, input_data_all) File "H:\Blender_ComfyUI\ComfyUI\execution.py", line 83, in get_output_data return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True) File "H:\Blender_ComfyUI\ComfyUI\execution.py", line 76, in map_node_over_list results.append(getattr(obj, func)(*slice_dict(input_data_all, i))) File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\nodes.py", line 204, in load_mm_and_inject_params mm = load_motion_module(model_name, motion_lora, model=model, motion_model_settings=motion_model_settings) File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module.py", line 216, in load_motion_module motion_module = AnimDiffMotionWrapper(mm_state_dict=mm_state_dict, mm_hash=model_hash, mm_name=model_name, loras=loras) File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py", line 44, in init self.down_blocks.append(MotionModule(c, temporal_position_encoding_max_len=self.encoding_max_len, block_type=BlockType.DOWN)) File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py", line 97, in init get_motion_module(in_channels, temporal_position_encoding_max_len), File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py", line 119, in get_motion_module return VanillaTemporalModule(in_channels=in_channels, temporal_position_encoding_max_len=temporal_position_encoding_max_len) File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py", line 137, in init self.temporal_transformer = TemporalTransformer3DModel( File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py", line 199, in init [ File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py", line 200, in TemporalTransformerBlock( File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py", line 292, in init VersatileAttention( File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py", line 386, in init super().init(args, **kwargs) File "H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_utils.py", line 40, in init self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
TemporalTransformerBlock( 文件“H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py”,第 292 行,init VersatileAttention( 文件“H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_module_ad.py”,第 386 行,init super().init(*args, **kwargs) 文件“H:\Blender_ComfyUI\ComfyUI\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\motion_utils.py”,第 40 行,init self.to_q = 操作。线性(query_dim, inner_dim, bias=False, dtype=dtype, device=device)