Open me-fraud opened 1 year ago
Same issue here with CodeGen model
anybody solve this problem
Ran into the same issue.
Same Issue. Appreciate any hint.
same issue,anybody solve this problem
same issue
@whcjb, can you please share full repro details? Thanks!
Hello!
I've encountered an issue trying to run dreambooth training with deepspeed in kohya_ss.
I am running into the error, which seems to occure inside the deepspeed stage_1_and_2.py line 508 - 509: lp_name = self.param_names[lp] param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address()
Additionaly i've tried to try - except these lines to see what happens, but ran into issues in the next parts of the code in engine.py (although not sure if it is somehow related).
my configuration is: 1GPU RTX 3060 (12Gb VRAM) WSL2 Ubuntu 22.04 in Windows 11 Cuda 11.7 Python 3.10.6 Torch 2.0.1+cu117 Accelerate 0.19.0 deepspeed 0.8.3 (although the problem is the same with 0.9.3) in training settings precision is set to fp16
deepspeed configuration JSON: { "zero_optimization": { "stage": 2 "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "reduce_scatter": true, "reduce_bucket_size": 2e8, "overlap_comm": true, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto" }
console output:
─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /home/me/kohya_ss/train_db.py:482 in │
│ │
│ 479 │ args = parser.parse_args() │
│ 480 │ args = train_util.read_config_from_file(args, parser) │
│ 481 │ │
│ ❱ 482 │ train(args) │
│ 483 │
│ │
│ /home/me/kohya_ss/train_db.py:202 in train │
│ │
│ 199 │ │
│ 200 │ # acceleratorがなんかよろしくやってくれるらしい │
│ 201 │ if train_text_encoder: │
│ ❱ 202 │ │ unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prep │
│ 203 │ │ │ unet, text_encoder, optimizer, train_dataloader, lr_scheduler │
│ 204 │ │ ) │
│ 205 │ else: │
│ │
│ /home/me/kohya_ss/venv/lib/python3.10/site-packages/accelerate/accelerator.py:1139 in prepare │
│ │
│ 1136 │ │ │ if self.device.type == "cpu" and self.state.ipex_plugin is not None: │
│ 1137 │ │ │ │ args = self._prepare_ipex(args) │
│ 1138 │ │ if self.distributed_type == DistributedType.DEEPSPEED: │
│ ❱ 1139 │ │ │ result = self._prepare_deepspeed(args) │
│ 1140 │ │ elif self.distributed_type == DistributedType.MEGATRON_LM: │
│ 1141 │ │ │ result = self._prepare_megatron_lm(*args) │
│ 1142 │ │ else: │
│ │
│ /home/me/kohya_ss/venv/lib/python3.10/site-packages/accelerate/accelerator.py:1446 in │
│ _prepare_deepspeed │
│ │
│ 1443 │ │ │ │ │ │ if type(scheduler).name in deepspeed.runtime.lr_schedules.VA │
│ 1444 │ │ │ │ │ │ │ kwargs["lrscheduler"] = scheduler │
│ 1445 │ │ │ │
│ ❱ 1446 │ │ │ engine, optimizer, , lr_scheduler = deepspeed.initialize(**kwargs) │
│ 1447 │ │ │ if optimizer is not None: │
│ 1448 │ │ │ │ optimizer = DeepSpeedOptimizerWrapper(optimizer) │
│ 1449 │ │ │ if scheduler is not None: │
│ │
│ /home/me/kohya_ss/venv/lib/python3.10/site-packages/deepspeed/init.py:125 in initialize │
│ │
│ 122 │ assert model is not None, "deepspeed.initialize requires a model" │
│ 123 │ │
│ 124 │ if not isinstance(model, PipelineModule): │
│ ❱ 125 │ │ engine = DeepSpeedEngine(args=args, │
│ 126 │ │ │ │ │ │ │ │ model=model, │
│ 127 │ │ │ │ │ │ │ │ optimizer=optimizer, │
│ 128 │ │ │ │ │ │ │ │ model_parameters=model_parameters, │
│ │
│ /home/me/kohya_ss/venv/lib/python3.10/site-packages/deepspeed/runtime/engine.py:340 in init │
│ │
│ 337 │ │ │ model_parameters = list(model_parameters) │
│ 338 │ │ │
│ 339 │ │ if has_optimizer: │
│ ❱ 340 │ │ │ self._configure_optimizer(optimizer, model_parameters) │
│ 341 │ │ │ self._configure_lr_scheduler(lr_scheduler) │
│ 342 │ │ │ self._report_progress(0) │
│ 343 │ │ elif self.zero_optimization(): │
│ │
│ /home/me/kohya_ss/venv/lib/python3.10/site-packages/deepspeed/runtime/engine.py:1298 in │
│ _configure_optimizer │
│ │
│ 1295 │ │ optimizer_wrapper = self._do_optimizer_sanity_check(basic_optimizer) │
│ 1296 │ │ │
│ 1297 │ │ if optimizer_wrapper == ZERO_OPTIMIZATION: │
│ ❱ 1298 │ │ │ self.optimizer = self._configure_zero_optimizer(basic_optimizer) │
│ 1299 │ │ elif optimizer_wrapper == AMP: │
│ 1300 │ │ │ amp_params = self.amp_params() │
│ 1301 │ │ │ log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0]) │
│ │
│ /home/me/kohya_ss/venv/lib/python3.10/site-packages/deepspeed/runtime/engine.py:1547 in │
│ _configure_zero_optimizer │
│ │
│ 1544 │ │ │ │ │ │ "Pipeline parallelism does not support overlapped communication, │
│ 1545 │ │ │ │ │ ) │
│ 1546 │ │ │ │ │ overlap_comm = False │
│ ❱ 1547 │ │ │ optimizer = DeepSpeedZeroOptimizer( │
│ 1548 │ │ │ │ optimizer, │
│ 1549 │ │ │ │ self.param_names, │
│ 1550 │ │ │ │ timers=timers, │
│ │
│ /home/me/kohya_ss/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py:527 │
│ in init │
│ │
│ 524 │ │ │
│ 525 │ │ self._link_all_hp_params() │
│ 526 │ │ self._enable_universal_checkpoint() │
│ ❱ 527 │ │ self._param_slice_mappings = self._create_param_mapping() │
│ 528 │ │
│ 529 │ def _enable_universal_checkpoint(self): │
│ 530 │ │ for lp_param_group in self.bit16_groups: │
│ │
│ /home/me/kohya_ss/venv/lib/python3.10/site-packages/deepspeed/runtime/zero/stage_1_and_2.py:539 │
│ in _create_param_mapping │
│ │
│ 536 │ │ │ param_mapping_per_group = OrderedDict() │
│ 537 │ │ │ for lp in self.bit16_groups[i]: │
│ 538 │ │ │ │ if lp._hp_mapping is not None: │
│ ❱ 539 │ │ │ │ │ lp_name = self.param_names[lp] │
│ 540 │ │ │ │ │ param_mapping_per_group[ │
│ 541 │ │ │ │ │ │ lp_name] = lp._hp_mapping.get_hp_fragment_address() │
│ 542 │ │ │ param_mapping.append(param_mapping_per_group) │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
KeyError: Parameter containing:
tensor([[[[-2.5410e-02, 2.5043e-02, 7.1978e-02],
[-1.3399e-02, -1.3034e-01, 1.1476e-01],
[-9.7030e-03, -1.3150e-02, 2.8044e-02]],
[02:21:28] ERROR failed (exitcode: 1) local_rank: 0 (pid: 129535) of binary: /home/me/kohya_ss/venv/bin/python3