Gettin below error:
Can someboday help?
I am running python i2v_test.py
with torch.no_grad(), torch.cuda.amp.autocast():
Traceback (most recent call last):
File "/home/ec2-user/SageMaker/DynamiCrafter/i2v_test.py", line 146, in
video_path = i2v.get_image('prompts/art.png','man fishing in a boat at sunset')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/i2v_test.py", line 65, in get_image
text_emb = model.get_learned_conditioning([prompt])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/models/ddpm3d.py", line 601, in get_learned_conditioning
c = self.cond_stage_model.encode(c)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/modules/encoders/condition.py", line 234, in encode
return self(text)
^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/modules/encoders/condition.py", line 211, in forward
z = self.encode_with_transformer(tokens.to(self.device))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/modules/encoders/condition.py", line 218, in encode_with_transformer
x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/modules/encoders/condition.py", line 230, in text_transformer_forward
x = r(x, attn_mask=attn_mask)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/open_clip/transformer.py", line 263, in forward
x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/open_clip/transformer.py", line 250, in attention
return self.attn(
^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/activation.py", line 1368, in forward
attn_output, attn_output_weights = F.multi_head_attention_forward(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/functional.py", line 6131, in multi_head_attention_forward
raise RuntimeError(
RuntimeError: The shape of the 2D attn_mask is torch.Size([77, 77]), but should be (1, 1).
Gettin below error: Can someboday help? I am running python i2v_test.py
with torch.no_grad(), torch.cuda.amp.autocast(): Traceback (most recent call last): File "/home/ec2-user/SageMaker/DynamiCrafter/i2v_test.py", line 146, in
video_path = i2v.get_image('prompts/art.png','man fishing in a boat at sunset')
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/i2v_test.py", line 65, in get_image
text_emb = model.get_learned_conditioning([prompt])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/models/ddpm3d.py", line 601, in get_learned_conditioning
c = self.cond_stage_model.encode(c)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/modules/encoders/condition.py", line 234, in encode
return self(text)
^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/modules/encoders/condition.py", line 211, in forward
z = self.encode_with_transformer(tokens.to(self.device))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/modules/encoders/condition.py", line 218, in encode_with_transformer
x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/SageMaker/DynamiCrafter/lvdm/modules/encoders/condition.py", line 230, in text_transformer_forward
x = r(x, attn_mask=attn_mask)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(*args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/open_clip/transformer.py", line 263, in forward
x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/open_clip/transformer.py", line 250, in attention
return self.attn(
^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1736, in _wrapped_call_impl
return self._call_impl(*args, *kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/module.py", line 1747, in _call_impl
return forward_call(args, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/modules/activation.py", line 1368, in forward
attn_output, attn_output_weights = F.multi_head_attention_forward(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/ec2-user/anaconda3/lib/python3.12/site-packages/torch/nn/functional.py", line 6131, in multi_head_attention_forward
raise RuntimeError(
RuntimeError: The shape of the 2D attn_mask is torch.Size([77, 77]), but should be (1, 1).