huggingface / diffusers

🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch and FLAX.
https://huggingface.co/docs/diffusers
Apache License 2.0
26.41k stars 5.43k forks source link

OSX 13.4 M2 AssertionError: Torch not compiled with CUDA enabled #3699

Closed elcolie closed 1 year ago

elcolie commented 1 year ago

Describe the bug

I am trying to run img2img A.I. on my M2 MBP 2023. The terminal raises up an error regarding CUDA is not enabled on my system.

Reproduction

Run this script.

import cv2
from PIL import Image
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
import torch
import numpy as np
from diffusers.utils import load_image

image = load_image("/Users/sarit/Desktop/c.jpeg")
image = np.array(image)

low_threshold = 100
high_threshold = 200

image = cv2.Canny(image, low_threshold, high_threshold)
image = image[:, :, None]
image = np.concatenate([image, image, image], axis=2)
image = Image.fromarray(image)

controlnet = ControlNetModel.from_pretrained(
    "lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16
    # "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16
)

pipe = StableDiffusionControlNetPipeline.from_pretrained(
    "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None
    # , torch_dtype=torch.float16
)

pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)

# Remove if you do not have xformers installed
# see https://huggingface.co/docs/diffusers/v0.13.0/en/optimization/xformers#installing-xformers
# for installation instructions
# pipe.enable_xformers_memory_efficient_attention()

pipe.enable_model_cpu_offload()

image = pipe("bird", image, num_inference_steps=20).images[0]

image.save('images/bird_canny_out.png')

Logs

❯ python main.py
You have disabled the safety checker for <class 'diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet.StableDiffusionControlNetPipeline'> by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .
Traceback (most recent call last):
  File "/Users/sarit/study/try_openai/try_image_to_image/main.py", line 38, in <module>
    image = pipe("bird", image, num_inference_steps=20).images[0]
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/sarit/anaconda3/envs/try_openai/lib/python3.11/site-packages/torch/utils/_contextlib.py", line 115, in decorate_context
    return func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^
  File "/Users/sarit/anaconda3/envs/try_openai/lib/python3.11/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py", line 902, in __call__
    prompt_embeds = self._encode_prompt(
                    ^^^^^^^^^^^^^^^^^^^^
  File "/Users/sarit/anaconda3/envs/try_openai/lib/python3.11/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_controlnet.py", line 412, in _encode_prompt
    text_input_ids.to(device),
    ^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/sarit/anaconda3/envs/try_openai/lib/python3.11/site-packages/torch/cuda/__init__.py", line 239, in _lazy_init
    raise AssertionError("Torch not compiled with CUDA enabled")
AssertionError: Torch not compiled with CUDA enabled

System Info

Python 3.11.3 OSX 13.4 #

This file is autogenerated by pip-compile with Python 3.10

by the following command:

#

pip-compile --output-file=requirements.txt requirements.in

# accelerate==0.19.0

via -r requirements.in

aiofiles==23.1.0

via gradio

aiohttp==3.8.4

via

#   datasets
#   fsspec
#   gradio
#   openai

aiosignal==1.3.1

via aiohttp

altair==5.0.0

via gradio

ansiwrap==0.8.4

via papermill

anyio==3.6.2

via

#   httpcore
#   starlette

appnope==0.1.3

via

#   ipykernel
#   ipython

asttokens==2.2.1

via stack-data

async-timeout==4.0.2

via aiohttp

attrs==23.1.0

via

#   aiohttp
#   jsonschema

autopep8==2.0.2

via ploomber

backcall==0.2.0

via ipython

backoff==2.2.1

via posthog

beautifulsoup4==4.12.2

via nbconvert

bleach==6.0.0

via nbconvert

certifi==2023.5.7

via

#   httpcore
#   httpx
#   requests

charset-normalizer==3.1.0

via

#   aiohttp
#   requests

click==8.1.3

via

#   papermill
#   ploomber
#   ploomber-core
#   ploomber-engine
#   uvicorn

colorama==0.4.6

via debuglater

comm==0.1.3

via ipykernel

contourpy==1.0.7

via matplotlib

cycler==0.11.0

via matplotlib

datasets==2.12.0

via -r requirements.in

debuglater==1.4.4

via ploomber-engine

debugpy==1.6.7

via ipykernel

decorator==5.1.1

via

#   ipdb
#   ipython

defusedxml==0.7.1

via nbconvert

diffusers==0.16.1

via -r requirements.in

dill==0.3.6

via

#   datasets
#   multiprocess

entrypoints==0.4

via papermill

executing==1.2.0

via stack-data

fastapi==0.95.2

via gradio

fastjsonschema==2.17.1

via nbformat

ffmpy==0.3.0

via gradio

filelock==3.12.0

via

#   diffusers
#   huggingface-hub
#   torch
#   transformers

fonttools==4.39.4

via matplotlib

frozenlist==1.3.3

via

#   aiohttp
#   aiosignal

fsspec[http]==2023.5.0

via

#   datasets
#   gradio-client
#   huggingface-hub

gradio==3.32.0

via -r requirements.in

gradio-client==0.2.5

via gradio

h11==0.14.0

via

#   httpcore
#   uvicorn

httpcore==0.17.2

via httpx

httpx==0.24.1

via

#   gradio
#   gradio-client

huggingface-hub==0.14.1

via

#   datasets
#   diffusers
#   gradio
#   gradio-client
#   transformers

humanize==4.6.0

via ploomber

idna==3.4

via

#   anyio
#   httpx
#   requests
#   yarl

importlib-metadata==6.6.0

via diffusers

ipdb==0.13.13

via ploomber

ipykernel==6.23.1

via ploomber

ipython==8.13.2

via

#   ipdb
#   ipykernel
#   ploomber
#   ploomber-engine

jedi==0.18.2

via ipython

jinja2==3.1.2

via

#   altair
#   gradio
#   nbconvert
#   ploomber
#   ploomber-scaffold
#   torch

jsonschema==4.17.3

via

#   altair
#   nbformat

jupyter-client==8.2.0

via

#   ipykernel
#   nbclient
#   ploomber

jupyter-core==5.3.0

via

#   ipykernel
#   jupyter-client
#   nbclient
#   nbconvert
#   nbformat

jupyterlab-pygments==0.2.2

via nbconvert

jupytext==1.14.5

via ploomber

kiwisolver==1.4.4

via matplotlib

linkify-it-py==2.0.2

via markdown-it-py

markdown-it-py[linkify]==2.2.0

via

#   gradio
#   jupytext
#   mdit-py-plugins

markupsafe==2.1.2

via

#   gradio
#   jinja2
#   nbconvert

matplotlib==3.7.1

via gradio

matplotlib-inline==0.1.6

via

#   ipykernel
#   ipython

mdit-py-plugins==0.3.3

via

#   gradio
#   jupytext

mdurl==0.1.2

via markdown-it-py

mistune==2.0.5

via

#   nbconvert
#   ploomber

monotonic==1.6

via posthog

mpmath==1.3.0

via sympy

multidict==6.0.4

via

#   aiohttp
#   yarl

multiprocess==0.70.14

via datasets

mypy-extensions==1.0.0

via typing-inspect

nbclient==0.7.4

via

#   nbconvert
#   papermill
#   ploomber-engine

nbconvert==7.4.0

via ploomber

nbformat==5.8.0

via

#   jupytext
#   nbclient
#   nbconvert
#   papermill
#   ploomber
#   ploomber-engine

nest-asyncio==1.5.6

via ipykernel

networkx==3.1

via

#   ploomber
#   torch

numpy==1.24.3

via

#   accelerate
#   altair
#   contourpy
#   datasets
#   diffusers
#   gradio
#   matplotlib
#   opencv-contrib-python
#   pandas
#   pyarrow
#   torchvision
#   transformers
#   xformers

openai==0.27.7

via -r requirements.in

opencv-contrib-python==4.7.0.72

via -r requirements.in

orjson==3.8.13

via gradio

packaging==23.1

via

#   accelerate
#   datasets
#   gradio-client
#   huggingface-hub
#   ipykernel
#   matplotlib
#   nbconvert
#   transformers

pandas==2.0.1

via

#   altair
#   datasets
#   gradio

pandocfilters==1.5.0

via nbconvert

papermill==2.4.0

via ploomber

parso==0.8.3

via

#   jedi
#   ploomber
#   ploomber-engine

pexpect==4.8.0

via ipython

pickleshare==0.7.5

via ipython

pillow==9.5.0

via

#   diffusers
#   gradio
#   matplotlib
#   torchvision

platformdirs==3.5.1

via jupyter-core

ploomber==0.22.3

via -r requirements.in

ploomber-core==0.2.10

via

#   ploomber
#   ploomber-engine

ploomber-engine==0.0.28

via ploomber

ploomber-scaffold==0.3.1

via ploomber

posthog==3.0.1

via

#   ploomber
#   ploomber-core

prompt-toolkit==3.0.38

via ipython

psutil==5.9.5

via

#   accelerate
#   ipykernel

ptyprocess==0.7.0

via pexpect

pure-eval==0.2.2

via stack-data

pyarrow==12.0.0

via datasets

pycodestyle==2.10.0

via autopep8

pydantic==1.10.7

via

#   fastapi
#   gradio
#   ploomber

pydub==0.25.1

via gradio

pyflakes==3.0.1

via ploomber

pygments==2.15.1

via

#   gradio
#   ipython
#   nbconvert
#   ploomber

pyparsing==3.0.9

via matplotlib

pypdf==3.9.0

via -r requirements.in

pyre-extensions==0.0.29

via xformers

pyrsistent==0.19.3

via jsonschema

python-dateutil==2.8.2

via

#   jupyter-client
#   matplotlib
#   pandas
#   posthog

python-dotenv==1.0.0

via -r requirements.in

python-multipart==0.0.6

via gradio

pytz==2023.3

via pandas

pyyaml==6.0

via

#   accelerate
#   datasets
#   gradio
#   huggingface-hub
#   jupytext
#   papermill
#   ploomber
#   ploomber-core
#   transformers

pyzmq==25.0.2

via

#   ipykernel
#   jupyter-client

regex==2023.5.5

via

#   diffusers
#   tiktoken
#   transformers

requests==2.30.0

via

#   datasets
#   diffusers
#   fsspec
#   gradio
#   gradio-client
#   huggingface-hub
#   openai
#   papermill
#   posthog
#   responses
#   tiktoken
#   torchvision
#   transformers

responses==0.18.0

via datasets

semantic-version==2.10.0

via gradio

six==1.16.0

via

#   asttokens
#   bleach
#   posthog
#   python-dateutil

sniffio==1.3.0

via

#   anyio
#   httpcore
#   httpx

soupsieve==2.4.1

via beautifulsoup4

sqlalchemy==2.0.15

via ploomber

sqlparse==0.4.4

via ploomber

stack-data==0.6.2

via ipython

starlette==0.27.0

via fastapi

sympy==1.12

via torch

tabulate==0.9.0

via ploomber

tenacity==8.2.2

via papermill

textwrap3==0.9.2

via ansiwrap

tiktoken==0.4.0

via -r requirements.in

tinycss2==1.2.1

via nbconvert

tokenizers==0.13.3

via transformers

toml==0.10.2

via jupytext

tomli==2.0.1

via

#   autopep8
#   ipdb

toolz==0.12.0

via altair

torch==2.0.1

via

#   -r requirements.in
#   accelerate
#   torchvision
#   xformers

torchvision==0.15.2

via -r requirements.in

tornado==6.3.2

via

#   ipykernel
#   jupyter-client

tqdm==4.65.0

via

#   datasets
#   huggingface-hub
#   openai
#   papermill
#   ploomber
#   ploomber-engine
#   transformers

traitlets==5.9.0

via

#   comm
#   ipykernel
#   ipython
#   jupyter-client
#   jupyter-core
#   matplotlib-inline
#   nbclient
#   nbconvert
#   nbformat

transformers==4.29.2

via -r requirements.in

typing-extensions==4.5.0

via

#   altair
#   gradio
#   gradio-client
#   huggingface-hub
#   pydantic
#   pyre-extensions
#   sqlalchemy
#   torch
#   typing-inspect

typing-inspect==0.9.0

via pyre-extensions

tzdata==2023.3

via pandas

uc-micro-py==1.0.2

via linkify-it-py

urllib3==2.0.2

via

#   requests
#   responses

uvicorn==0.22.0

via gradio

wcwidth==0.2.6

via prompt-toolkit

webencodings==0.5.1

via

#   bleach
#   tinycss2

websockets==11.0.3

via

#   gradio
#   gradio-client

xformers==0.0.20

via -r requirements.in

xxhash==3.2.0

via datasets

yarl==1.9.2

via aiohttp

zipp==3.15.0

via importlib-metadata

elcolie commented 1 year ago

I can run code by remove some arguemnts.

  1. Remove torch_dtype=torch.float16
  2. Remove pipe.enable_model_cpu_offload() My runnable code on M2 CPU.
    
    """Run on CPU M2 not support MPS."""
    import cv2
    from PIL import Image
    from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
    import torch
    import numpy as np
    from diffusers.utils import load_image

image = load_image("control.png") image = np.array(image) control_image = Image.fromarray(image)

low_threshold = 100 high_threshold = 200

image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) image = Image.fromarray(image)

controlnet = ControlNetModel.from_pretrained( "lllyasviel/control_v11p_sd15_canny", )

pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, )

pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)

generator = torch.manual_seed(31) prompt: str = "a blue paradise bird in the jungle" image = pipe(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]

image.save('images/bird_canny_out.png')