Hi! I am trying to use your model to generate an image,
but when I put in width and height greater than 512x512, the following error occurs:
Traceback (most recent call last):
File "/home/joonjeon/LLM_layout_generator/generate.py", line 62, in <module>
image = pipe(
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "/home/joonjeon/LLM_layout_generator/pipeline.py", line 689, in __call__
noise_pred = self.unet(
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/accelerate/hooks.py", line 166, in new_forward
output = module._old_forward(*args, **kwargs)
File "/home/joonjeon/LLM_layout_generator/diffusers/models/unet_2d_condition.py", line 958, in forward
sample, res_samples = downsample_block(
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/joonjeon/LLM_layout_generator/diffusers/models/unet_2d_blocks.py", line 1086, in forward
hidden_states = attn(
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/joonjeon/LLM_layout_generator/diffusers/models/transformer_2d.py", line 315, in forward
hidden_states = block(
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/joonjeon/LLM_layout_generator/diffusers/models/attention.py", line 232, in forward
hidden_states = attn_layer(
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "/home/joonjeon/miniconda3/envs/pytorch/lib/python3.9/site-packages/torch/nn/modules/module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "/home/joonjeon/LLM_layout_generator/controlatt_net.py", line 70, in forward
encoder_attention_mask = encoder_attention_mask[:, ::scale, ::scale, :].reshape(b, -1, l)
ValueError: slice step cannot be zero
Hi! I am trying to use your model to generate an image, but when I put in width and height greater than 512x512, the following error occurs:
Any ideas on why this is happening?