How do I solve this problem? The error is as above, and the config is attached below
Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
LoggerHook, ParamSchedulerHook)
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
from peft import LoraConfig
from torch.optim import AdamW
from transformers import (AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig, CLIPImageProcessor,
CLIPVisionModel)
from xtuner.dataset import LLaVADataset
from xtuner.dataset.collate_fns import default_collate_fn
from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory
from xtuner.dataset.samplers import LengthGroupedSampler
from xtuner.engine.hooks import DatasetInfoHook, EvaluateChatHook
from xtuner.engine.runner import TrainLoop
from xtuner.model import LLaVAModel
from xtuner.utils import PROMPT_TEMPLATE
timer=dict(type=IterTimerHook),
# print log every 10 iterations.
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10),
# enable the parameter scheduler.
param_scheduler=dict(type=ParamSchedulerHook),
# save checkpoint per `save_steps`.
checkpoint=dict(
type=CheckpointHook,
by_epoch=False,
interval=save_steps,
max_keep_ckpts=save_total_limit),
# set sampler seed in distributed evrionment.
sampler_seed=dict(type=DistSamplerSeedHook),
)
configure environment
env_cfg = dict(
whether to enable cudnn benchmark
cudnn_benchmark=False,
# set multi process parameters
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
# set distributed parameters
dist_cfg=dict(backend='nccl'),
)
set visualizer
visualizer = None
set log level
log_level = 'INFO'
load from which checkpoint
load_from = None
whether to resume training from the loaded checkpoint
resume = False
Defaults to use random seed and disable deterministic
How do I solve this problem? The error is as above, and the config is attached below
Copyright (c) OpenMMLab. All rights reserved.
import torch from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, LoggerHook, ParamSchedulerHook) from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR from peft import LoraConfig from torch.optim import AdamW from transformers import (AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor, CLIPVisionModel)
from xtuner.dataset import LLaVADataset from xtuner.dataset.collate_fns import default_collate_fn from xtuner.dataset.map_fns import llava_map_fn, template_map_fn_factory from xtuner.dataset.samplers import LengthGroupedSampler from xtuner.engine.hooks import DatasetInfoHook, EvaluateChatHook from xtuner.engine.runner import TrainLoop from xtuner.model import LLaVAModel from xtuner.utils import PROMPT_TEMPLATE
#######################################################################
PART 1 Settings
#######################################################################
Model
llm_name_or_path = '' visual_encoder_name_or_path = ''
Specify the pretrained pth
pretrained_pth = '' # noqa: E501
Data
data_root = '' data_path = data_root + '' image_folder = '' prompt_template = PROMPT_TEMPLATE.internlm2_chat max_length = int(2048 - (336 / 14)**2)
Scheduler & Optimizer
batch_size = 1 # per_device accumulative_counts = 4 dataloader_num_workers = 4 max_epochs = 1 optim_type = AdamW lr = 2e-5 betas = (0.9, 0.999) weight_decay = 0 max_norm = 1 # grad clip warmup_ratio = 0.03
Save
save_steps = 50000 save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited)
Evaluate the generation performance during the training
evaluation_freq = 5000 SYSTEM = '' evaluation_images = '' evaluation_inputs = ['请用一段话生成报告', 'Please describe this picture'] #######################################################################
PART 2 Model & Tokenizer & Image Processor
####################################################################### tokenizer = dict( type=AutoTokenizer.from_pretrained, pretrained_model_name_or_path=llm_name_or_path, trust_remote_code=True, padding_side='right')
image_processor = dict( type=CLIPImageProcessor.from_pretrained, pretrained_model_name_or_path=visual_encoder_name_or_path, trust_remote_code=True)
model = dict( type=LLaVAModel, freeze_llm=False, freeze_visual_encoder=True, pretrained_pth=pretrained_pth, llm=dict( type=AutoModelForCausalLM.from_pretrained, pretrained_model_name_or_path=llm_name_or_path, trust_remote_code=True, torch_dtype=torch.float32),
quantization_config=dict(
#######################################################################
PART 3 Dataset & Dataloader
####################################################################### llava_dataset = dict( type=LLaVADataset, data_path=data_path, image_folder=image_folder, tokenizer=tokenizer, image_processor=image_processor, dataset_map_fn=llava_map_fn, template_map_fn=dict( type=template_map_fn_factory, template=prompt_template), max_length=max_length, pad_image_to_square=True)
train_dataloader = dict( batch_size=batch_size, num_workers=dataloader_num_workers, pin_memory=True, dataset=llava_dataset, sampler=dict( type=LengthGroupedSampler, length_property='modality_length', per_device_batch_size=batch_size * accumulative_counts), collate_fn=dict(type=default_collate_fn))
#######################################################################
PART 4 Scheduler & Optimizer
#######################################################################
optimizer
optim_wrapper = dict( type=AmpOptimWrapper, optimizer=dict( type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), accumulative_counts=accumulative_counts, loss_scale='dynamic', dtype='float16')
learning policy
More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
param_scheduler = [ dict( type=LinearLR, start_factor=1e-5, by_epoch=True, begin=0, end=warmup_ratio max_epochs, convert_to_iter_based=True), dict( type=CosineAnnealingLR, eta_min=0.0, by_epoch=True, begin=warmup_ratio max_epochs, end=max_epochs, convert_to_iter_based=True) ]
train, val, test setting
train_cfg = dict(type=TrainLoop, max_epochs=max_epochs)
#######################################################################
PART 5 Runtime
#######################################################################
Log the dialogue periodically during the training process, optional
custom_hooks = [ dict(type=DatasetInfoHook, tokenizer=tokenizer), dict( type=EvaluateChatHook, tokenizer=tokenizer, image_processor=image_processor, every_n_iters=evaluation_freq, evaluation_inputs=evaluation_inputs, evaluation_images=evaluation_images, system=SYSTEM, prompt_template=prompt_template) ]
configure default hooks
default_hooks = dict(
record the time of every iteration.
)
configure environment
env_cfg = dict(
whether to enable cudnn benchmark
)
set visualizer
visualizer = None
set log level
log_level = 'INFO'
load from which checkpoint
load_from = None
whether to resume training from the loaded checkpoint
resume = False
Defaults to use random seed and disable
deterministic
randomness = dict(seed=None, deterministic=False)
set log processor
log_processor = dict(by_epoch=False)