NSTiwari / Fine-tune-IDEFICS-Vision-Language-Model

This repository demonstrates the data preparation and fine-tuning the IDEFICS Vision Language Model.
MIT License
15 stars 1 forks source link

TypeError: zip() takes no keyword arguments #2

Open feisuo opened 1 week ago

feisuo commented 1 week ago

Hi, I am getting following error on execute trainer.train()


TypeError Traceback (most recent call last) Cell In[13], line 1 ----> 1 trainer.train()

File ~/miniconda3/envs/vision_modle/lib/python3.9/site-packages/transformers/trainer.py:2052, in Trainer.train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs) 2050 hf_hub_utils.enable_progress_bars() 2051 else: -> 2052 return inner_training_loop( 2053 args=args, 2054 resume_from_checkpoint=resume_from_checkpoint, 2055 trial=trial, 2056 ignore_keys_for_eval=ignore_keys_for_eval, 2057 )

File ~/miniconda3/envs/vision_modle/lib/python3.9/site-packages/transformers/trainer.py:2204, in Trainer._inner_training_loop(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval) 2202 model = self.accelerator.prepare(self.model) 2203 else: -> 2204 model, self.optimizer = self.accelerator.prepare(self.model, self.optimizer) 2205 else: 2206 # to handle cases wherein we pass "DummyScheduler" such as when it is specified in DeepSpeed config. 2207 model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( 2208 self.model, self.optimizer, self.lr_scheduler 2209 )

File ~/miniconda3/envs/vision_modle/lib/python3.9/site-packages/accelerate/accelerator.py:1326, in Accelerator.prepare(self, device_placement, *args) 1324 # MS-AMP will handle the device placement 1325 deviceplacement = [False for in args] -> 1326 result = tuple( 1327 self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement) 1328 ) 1329 result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement)) 1330 if tpu_should_fix_optimizer: 1331 # 2. grabbing new model parameters

File ~/miniconda3/envs/vision_modle/lib/python3.9/site-packages/accelerate/accelerator.py:1327, in (.0) 1324 # MS-AMP will handle the device placement 1325 deviceplacement = [False for in args] 1326 result = tuple( -> 1327 self._prepare_one(obj, first_pass=True, device_placement=d) for obj, d in zip(args, device_placement) 1328 ) 1329 result = tuple(self._prepare_one(obj, device_placement=d) for obj, d in zip(result, device_placement)) 1330 if tpu_should_fix_optimizer: 1331 # 2. grabbing new model parameters

File ~/miniconda3/envs/vision_modle/lib/python3.9/site-packages/accelerate/accelerator.py:1202, in Accelerator._prepare_one(self, obj, first_pass, device_placement) 1200 return self.prepare_model(obj, device_placement=device_placement) 1201 elif isinstance(obj, torch.optim.Optimizer): -> 1202 optimizer = self.prepare_optimizer(obj, device_placement=device_placement) 1203 return optimizer 1204 # Second pass of preparation: LR scheduler (which need the full list of optimizers)

File ~/miniconda3/envs/vision_modle/lib/python3.9/site-packages/accelerate/accelerator.py:2119, in Accelerator.prepare_optimizer(self, optimizer, device_placement) 2117 if device_placement is None: 2118 device_placement = self.device_placement -> 2119 optimizer = AcceleratedOptimizer(optimizer, device_placement=device_placement, scaler=self.scaler) 2120 self._optimizers.append(optimizer) 2121 return optimizer

File ~/miniconda3/envs/vision_modle/lib/python3.9/site-packages/accelerate/optimizer.py:75, in AcceleratedOptimizer.init(self, optimizer, device_placement, scaler) 73 else: 74 state_dict = move_to_device(state_dict, self.accelerator_state.device) ---> 75 self.optimizer.load_state_dict(state_dict)

File ~/miniconda3/envs/vision_modle/lib/python3.9/site-packages/bitsandbytes/optim/optimizer.py:176, in Optimizer8bit.load_state_dict(self, state_dict, move_to_device) 174 param_lens = (len(g["params"]) for g in groups) 175 saved_lens = (len(g["params"]) for g in saved_groups) --> 176 if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens, strict=True)): 177 raise ValueError( 178 "loaded state dict contains a parameter group that doesn't match the size of optimizer's group", 179 ) 181 # Update the state

TypeError: zip() takes no keyword arguments

feisuo commented 1 week ago

resoved it . change bitsandbytes version to [0.43.2]