lshqqytiger / stable-diffusion-webui-amdgpu-forge

Forge for stable-diffusion-webui-amdgpu (formerly stable-diffusion-webui-directml)
GNU Affero General Public License v3.0
55 stars 4 forks source link

Error : Descriptors cannot be created directly. #20

Closed Dwanvea closed 2 months ago

Dwanvea commented 2 months ago
Traceback (most recent call last):
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\launch.py", line 51, in <module>
    main()
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\launch.py", line 47, in main
    start()
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\modules\launch_utils.py", line 658, in start
    import webui
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\webui.py", line 21, in <module>
    initialize.imports()
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\modules\initialize.py", line 16, in imports
    import pytorch_lightning  # noqa: F401
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\__init__.py", line 35, in <module>
    from pytorch_lightning.callbacks import Callback  # noqa: E402
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\callbacks\__init__.py", line 28, in <module>
    from pytorch_lightning.callbacks.pruning import ModelPruning
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\callbacks\pruning.py", line 31, in <module>
    from pytorch_lightning.core.module import LightningModule
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\core\__init__.py", line 16, in <module>
    from pytorch_lightning.core.module import LightningModule
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\core\module.py", line 48, in <module>
    from pytorch_lightning.trainer.connectors.logger_connector.fx_validator import _FxValidator
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\trainer\__init__.py", line 17, in <module>
    from pytorch_lightning.trainer.trainer import Trainer
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\trainer\trainer.py", line 58, in <module>
    from pytorch_lightning.loops import PredictionLoop, TrainingEpochLoop
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\loops\__init__.py", line 15, in <module>
    from pytorch_lightning.loops.batch import TrainingBatchLoop  # noqa: F401
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\loops\batch\__init__.py", line 15, in <module>
    from pytorch_lightning.loops.batch.training_batch_loop import TrainingBatchLoop  # noqa: F401
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\loops\batch\training_batch_loop.py", line 20, in <module>
    from pytorch_lightning.loops.optimization.manual_loop import _OUTPUTS_TYPE as _MANUAL_LOOP_OUTPUTS_TYPE
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\loops\optimization\__init__.py", line 15, in <module>
    from pytorch_lightning.loops.optimization.manual_loop import ManualOptimization  # noqa: F401
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\loops\optimization\manual_loop.py", line 23, in <module>
    from pytorch_lightning.loops.utilities import _build_training_step_kwargs, _extract_hiddens
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\loops\utilities.py", line 29, in <module>
    from pytorch_lightning.strategies.parallel import ParallelStrategy
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\strategies\__init__.py", line 15, in <module>
    from pytorch_lightning.strategies.bagua import BaguaStrategy  # noqa: F401
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\strategies\bagua.py", line 29, in <module>
    from pytorch_lightning.plugins.precision import PrecisionPlugin
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\plugins\__init__.py", line 7, in <module>
    from pytorch_lightning.plugins.precision.apex_amp import ApexMixedPrecisionPlugin
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\plugins\precision\__init__.py", line 18, in <module>
    from pytorch_lightning.plugins.precision.fsdp_native_native_amp import FullyShardedNativeNativeMixedPrecisionPlugin
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\pytorch_lightning\plugins\precision\fsdp_native_native_amp.py", line 24, in <module>
    from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\fsdp\__init__.py", line 1, in <module>
    from ._flat_param import FlatParameter as FlatParameter
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\fsdp\_flat_param.py", line 30, in <module>
    from torch.distributed.fsdp._common_utils import (
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\fsdp\_common_utils.py", line 35, in <module>
    from torch.distributed.fsdp._fsdp_extensions import FSDPExtensions
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\fsdp\_fsdp_extensions.py", line 8, in <module>
    from torch.distributed._tensor import DeviceMesh, DTensor
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\_tensor\__init__.py", line 6, in <module>
    import torch.distributed._tensor.ops
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\_tensor\ops\__init__.py", line 2, in <module>
    from .embedding_ops import *  # noqa: F403
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\_tensor\ops\embedding_ops.py", line 8, in <module>
    import torch.distributed._functional_collectives as funcol
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\_functional_collectives.py", line 12, in <module>
    from . import _functional_collectives_impl as fun_col_impl
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\distributed\_functional_collectives_impl.py", line 36, in <module>
    from torch._dynamo import assume_constant_result
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\_dynamo\__init__.py", line 2, in <module>
    from . import convert_frame, eval_frame, resume_execution
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\_dynamo\convert_frame.py", line 40, in <module>
    from . import config, exc, trace_rules
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\_dynamo\trace_rules.py", line 50, in <module>
    from .variables import (
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\_dynamo\variables\__init__.py", line 34, in <module>
    from .higher_order_ops import (
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\_dynamo\variables\higher_order_ops.py", line 13, in <module>
    import torch.onnx.operators
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\onnx\__init__.py", line 59, in <module>
    from ._internal.onnxruntime import (
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\torch\onnx\_internal\onnxruntime.py", line 36, in <module>
    import onnx
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\onnx\__init__.py", line 6, in <module>
    from onnx.external_data_helper import load_external_data_for_model, write_external_data_tensors, convert_model_to_external_data
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\onnx\external_data_helper.py", line 9, in <module>
    from .onnx_pb import TensorProto, ModelProto, AttributeProto, GraphProto
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\onnx\onnx_pb.py", line 4, in <module>
    from .onnx_ml_pb2 import *  # noqa
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\onnx\onnx_ml_pb2.py", line 33, in <module>
    _descriptor.EnumValueDescriptor(
  File "E:\Stable Diffusion\stable-diffusion-webui-amdgpu-forge\venv\lib\site-packages\google\protobuf\descriptor.py", line 789, in __new__
    _message.Message._CheckCalledFromGeneratedFile()
TypeError: Descriptors cannot be created directly.
If this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0.
If you cannot immediately regenerate your protos, some other possible workarounds are:
 1. Downgrade the protobuf package to 3.20.x or lower.
 2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower).

More information: https://developers.google.com/protocol-buffers/docs/news/2022-05-06#python-updates

What should I do?

Dwanvea commented 2 months ago

running "pip cache purge" in cmd, fixed the issue.