Closed 14790897 closed 1 year ago
cicitai下载的模型名字没自动修改,Length: 2400040320 (2.2G) [application/octet-stream] Saving to: ‘/content/Lora/sd_model/102222
好的,我有空会看看。
所以现在问题是解决了吗?和之前的问题有点像 https://github.com/WSH032/kohya-config-webui/issues/4 ,应该确实是没改扩展名的问题。
是在用预设的模型,还是自定义的url下载?
──────────────────── Traceback (most recent call last) ──────────────────────╮ │ /content/sd-scripts/train_network.py:873 in │
│ │
│ 870 │ args = parser.parse_args() │
│ 871 │ args = train_util.read_config_from_file(args, parser) │
│ 872 │ │
│ ❱ 873 │ train(args) │
│ 874 │
│ │
│ /content/sd-scripts/train_network.py:168 in train │
│ │
│ 165 │ weight_dtype, save_dtype = train_util.prepare_dtype(args) │
│ 166 │ │
│ 167 │ # モデルを読み込む │
│ ❱ 168 │ textencoder, vae, unet, = train_util.load_target_model(args, we │
│ 169 │ │
│ 170 │ # モデルに xformers とか memory efficient attention を組み込む │
│ 171 │ train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xfor │
│ │
│ /content/sd-scripts/library/train_util.py:3149 in load_target_model │
│ │
│ 3146 │ │ if pi == accelerator.state.local_process_index: │
│ 3147 │ │ │ print(f"loading model for process {accelerator.state.loca │
│ 3148 │ │ │ │
│ ❱ 3149 │ │ │ text_encoder, vae, unet, load_stable_diffusionformat = │
│ 3150 │ │ │ │ args, weight_dtype, accelerator.device if args.lowram │
│ 3151 │ │ │ ) │
│ 3152 │
│ │
│ /content/sd-scripts/library/train_util.py:3115 in _load_target_model │
│ │
│ 3112 │ load_stable_diffusion_format = os.path.isfile(name_or_path) # de │
│ 3113 │ if load_stable_diffusion_format: │
│ 3114 │ │ print(f"load StableDiffusion checkpoint: {name_or_path}") │
│ ❱ 3115 │ │ text_encoder, vae, unet = model_util.load_models_fromstable │
│ 3116 │ else: │
│ 3117 │ │ # Diffusers model is loaded to CPU │
│ 3118 │ │ print(f"load Diffusers pretrained models: {name_or_path}") │
│ │
│ /content/sd-scripts/library/model_util.py:856 in │
│ load_models_from_stable_diffusion_checkpoint │
│ │
│ 853 │
│ 854 # TODO dtype指定の動作が怪しいので確認する text_encoderを指定形式で作 │
│ 855 def load_models_from_stable_diffusion_checkpoint(v2, ckptpath, devic │
│ ❱ 856 │ , state_dict = load_checkpoint_with_text_encoder_conversion(ckpt │
│ 857 │ │
│ 858 │ # Convert the UNet2DConditionModel model. │
│ 859 │ unet_config = create_unet_diffusers_config(v2, unet_use_linear_pr │
│ │
│ /content/sd-scripts/library/model_util.py:833 in │
│ load_checkpoint_with_text_encoder_conversion │
│ │
│ 830 │ │ checkpoint = None │
│ 831 │ │ state_dict = load_file(ckpt_path) # , device) # may causes e │
│ 832 │ else: │
│ ❱ 833 │ │ checkpoint = torch.load(ckpt_path, map_location=device) │
│ 834 │ │ if "state_dict" in checkpoint: │
│ 835 │ │ │ state_dict = checkpoint["state_dict"] │
│ 836 │ │ else: │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/serialization.py:815 in load │
│ │
│ 812 │ │ │ │ return _legacy_load(opened_file, map_location, _weigh │
│ 813 │ │ │ except RuntimeError as e: │
│ 814 │ │ │ │ raise pickle.UnpicklingError(UNSAFE_MESSAGE + str(e)) │
│ ❱ 815 │ │ return _legacy_load(opened_file, map_location, pickle_module, │
│ 816 │
│ 817 │
│ 818 # Register pickling support for layout instances such as │
│ │
│ /usr/local/lib/python3.10/dist-packages/torch/serialization.py:1033 in │
│ _legacy_load │
│ │
│ 1030 │ │ │ f"Received object of type \"{type(f)}\". Please update to │
│ 1031 │ │ │ "functionality.") │
│ 1032 │ │
│ ❱ 1033 │ magic_number = pickle_module.load(f, pickle_load_args) │
│ 1034 │ if magic_number != MAGIC_NUMBER: │
│ 1035 │ │ raise RuntimeError("Invalid magic number; corrupt file?") │
│ 1036 │ protocol_version = pickle_module.load(f, pickle_load_args) │
╰──────────────────────────────────────────────────────────────────────────────╯
UnpicklingError: invalid load key, '^'.
╭───────────────────── Traceback (most recent call last) ──────────────────────╮
│ /usr/local/bin/accelerate:8 in │
│ │
│ 5 from accelerate.commands.accelerate_cli import main │
│ 6 if name == 'main': │
│ 7 │ sys.argv[0] = re.sub(r'(-script.pyw|.exe)?$', '', sys.argv[0]) │
│ ❱ 8 │ sys.exit(main()) │
│ 9 │
│ │
│ /usr/local/lib/python3.10/dist-packages/accelerate/commands/accelerate_cli.p │
│ y:45 in main │
│ │
│ 42 │ │ exit(1) │
│ 43 │ │
│ 44 │ # Run │
│ ❱ 45 │ args.func(args) │
│ 46 │
│ 47 │
│ 48 if name == "main": │
│ │
│ /usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py:1104 │
│ in launch_command │
│ │
│ 1101 │ elif defaults is not None and defaults.compute_environment == Com │
│ 1102 │ │ sagemaker_launcher(defaults, args) │
│ 1103 │ else: │
│ ❱ 1104 │ │ simple_launcher(args) │
│ 1105 │
│ 1106 │
│ 1107 def main(): │
│ │
│ /usr/local/lib/python3.10/dist-packages/accelerate/commands/launch.py:567 in │
│ simple_launcher │
│ │
│ 564 │ process = subprocess.Popen(cmd, env=current_env) │
│ 565 │ process.wait() │
│ 566 │ if process.returncode != 0: │
│ ❱ 567 │ │ raise subprocess.CalledProcessError(returncode=process.return │
│ 568 │
│ 569 │
│ 570 def multi_gpu_launcher(args): │
╰──────────────────────────────────────────────────────────────────────────────╯
CalledProcessError: Command '['/usr/bin/python3', 'train_network.py',
'--config_file=config_file.toml', '--sample_prompts=sample_prompts.txt']'
returned non-zero exit status 1.