InternLM / Tutorial

LLM Tutorial
942 stars 221 forks source link

在自己数据集上xunter train报错 #727

Open cococoda opened 1 month ago

cococoda commented 1 month ago

`(xt) root@autodl-container-acc940bcfe-be30ce08:~/autodl-tmp/ft# cd config (xt) root@autodl-container-acc940bcfe-be30ce08:~/autodl-tmp/ft/config# xtuner train internlm2_chat_7b_qlora_alpaca_e3_copy.py [2024-05-30 18:04:29,376] [INFO] [real_accelerator.py:203:get_accelerator] Setting ds_accelerator to cuda (auto detect) 05/30 18:04:29 - mmengine - WARNING - WARNING: command error: '[Errno 2] No such file or directory: '/root/autodl-tmp/ft/data/cutlass/CHANGELOG.md''! 05/30 18:04:29 - mmengine - WARNING - Arguments received: ['xtuner', 'train', 'internlm2_chat_7b_qlora_alpaca_e3_copy.py']. xtuner commands use the following syntax:

    xtuner MODE MODE_ARGS ARGS

    Where   MODE (required) is one of ('list-cfg', 'copy-cfg', 'log-dataset', 'check-custom-dataset', 'train', 'test', 'chat', 'convert', 'preprocess', 'mmbench', 'eval_refcoco')
            MODE_ARG (optional) is the argument for specific mode
            ARGS (optional) are the arguments for specific command

Some usages for xtuner commands: (See more by using -h for specific command!)

    1. List all predefined configs:
        xtuner list-cfg
    2. Copy a predefined config to a given path:
        xtuner copy-cfg $CONFIG $SAVE_FILE
    3-1. Fine-tune LLMs by a single GPU:
        xtuner train $CONFIG
    3-2. Fine-tune LLMs by multiple GPUs:
        NPROC_PER_NODE=$NGPUS NNODES=$NNODES NODE_RANK=$NODE_RANK PORT=$PORT ADDR=$ADDR xtuner dist_train $CONFIG $GPUS
    4-1. Convert the pth model to HuggingFace's model:
        xtuner convert pth_to_hf $CONFIG $PATH_TO_PTH_MODEL $SAVE_PATH_TO_HF_MODEL
    4-2. Merge the HuggingFace's adapter to the pretrained base model:
        xtuner convert merge $LLM $ADAPTER $SAVE_PATH
        xtuner convert merge $CLIP $ADAPTER $SAVE_PATH --is-clip
    4-3. Split HuggingFace's LLM to the smallest sharded one:
        xtuner convert split $LLM $SAVE_PATH
    5-1. Chat with LLMs with HuggingFace's model and adapter:
        xtuner chat $LLM --adapter $ADAPTER --prompt-template $PROMPT_TEMPLATE --system-template $SYSTEM_TEMPLATE
    5-2. Chat with VLMs with HuggingFace's model and LLaVA:
        xtuner chat $LLM --llava $LLAVA --visual-encoder $VISUAL_ENCODER --image $IMAGE --prompt-template $PROMPT_TEMPLATE --system-template $SYSTEM_TEMPLATE
    6-1. Preprocess arxiv dataset:
        xtuner preprocess arxiv $SRC_FILE $DST_FILE --start-date $START_DATE --categories $CATEGORIES
    6-2. Preprocess refcoco dataset:
        xtuner preprocess refcoco --ann-path $RefCOCO_ANN_PATH --image-path $COCO_IMAGE_PATH --save-path $SAVE_PATH
    7-1. Log processed dataset:
        xtuner log-dataset $CONFIG
    7-2. Verify the correctness of the config file for the custom dataset:
        xtuner check-custom-dataset $CONFIG
    8. MMBench evaluation:
        xtuner mmbench $LLM --llava $LLAVA --visual-encoder $VISUAL_ENCODER --prompt-template $PROMPT_TEMPLATE --data-path $MMBENCH_DATA_PATH
    9. Refcoco evaluation:
        xtuner eval_refcoco $LLM --llava $LLAVA --visual-encoder $VISUAL_ENCODER --prompt-template $PROMPT_TEMPLATE --data-path $REFCOCO_DATA_PATH
    10. List all dataset formats which are supported in XTuner

Run special commands:

    xtuner help
    xtuner version

GitHub: https://github.com/InternLM/xtuner`

config文件: `# Copyright (c) OpenMMLab. All rights reserved. import torch from datasets import load_dataset from mmengine.dataset import DefaultSampler from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, LoggerHook, ParamSchedulerHook) from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR from peft import LoraConfig from torch.optim import AdamW from transformers import (AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig)

from xtuner.dataset import process_hf_dataset from xtuner.dataset.collate_fns import default_collate_fn from xtuner.dataset.map_fns import openai_map_fn, template_map_fn_factory from xtuner.engine.hooks import (DatasetInfoHook, EvaluateChatHook, VarlenAttnArgsToMessageHubHook) from xtuner.engine.runner import TrainLoop from xtuner.model import SupervisedFinetune from xtuner.parallel.sequence import SequenceParallelSampler from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE

#######################################################################

PART 1 Settings

#######################################################################

Model

pretrained_model_name_or_path = './autodl-tmp/RAG-langchain/models/internlm2-chat-7b' use_varlen_attn = False

Data

alpaca_en_path = './autodl-tmp/ft/data/train_fold_1.json' prompt_template = PROMPT_TEMPLATE.internlm2_chat max_length = 1024 pack_to_max_length = True

parallel

sequence_parallel_size = 1

Scheduler & Optimizer

batch_size = 1 # per_device accumulative_counts = 16 accumulative_counts *= sequence_parallel_size dataloader_num_workers = 0 max_epochs = 2 optim_type = AdamW lr = 2e-4 betas = (0.9, 0.999) weight_decay = 0 max_norm = 1 # grad clip warmup_ratio = 0.03

Save

save_steps = 300 save_total_limit = 3 # Maximum checkpoints to keep (-1 means unlimited)

Evaluate the generation performance during the training

evaluation_freq = 300 SYSTEM = '' evaluation_inputs = ['判断以下新闻情绪,积极为1,消极为0', '判断该新闻正负面,正面为1,负面为0', '请判断以下新闻是积极还是消极,积极为1,消极为0,你的答案只有1或0']

#######################################################################

PART 2 Model & Tokenizer

####################################################################### tokenizer = dict( type=AutoTokenizer.from_pretrained, pretrained_model_name_or_path=pretrained_model_name_or_path, trust_remote_code=True, padding_side='right')

model = dict( type=SupervisedFinetune, use_varlen_attn=use_varlen_attn, llm=dict( type=AutoModelForCausalLM.from_pretrained, pretrained_model_name_or_path=pretrained_model_name_or_path, trust_remote_code=True, torch_dtype=torch.float16, quantization_config=dict( type=BitsAndBytesConfig, load_in_4bit=True, load_in_8bit=False, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type='nf4')), lora=dict( type=LoraConfig, r=64, lora_alpha=16, lora_dropout=0.1, bias='none', task_type='CAUSAL_LM'))

#######################################################################

PART 3 Dataset & Dataloader

####################################################################### alpaca_en = dict( type=process_hf_dataset, dataset=dict(type=load_dataset, path='json', data_files=dict(train=alpaca_en_path)), tokenizer=tokenizer, max_length=max_length, dataset_map_fn=openai_map_fn, template_map_fn=dict( type=template_map_fn_factory, template=prompt_template), remove_unused_columns=True, shuffle_before_pack=True, pack_to_max_length=pack_to_max_length, use_varlen_attn=use_varlen_attn)

sampler = SequenceParallelSampler \ if sequence_parallel_size > 1 else DefaultSampler train_dataloader = dict( batch_size=batch_size, num_workers=dataloader_num_workers, dataset=alpaca_en, sampler=dict(type=sampler, shuffle=True), collate_fn=dict(type=default_collate_fn, use_varlen_attn=use_varlen_attn))

#######################################################################

PART 4 Scheduler & Optimizer

#######################################################################

optimizer

optim_wrapper = dict( type=AmpOptimWrapper, optimizer=dict( type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), accumulative_counts=accumulative_counts, loss_scale='dynamic', dtype='float16')

learning policy

More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501

param_scheduler = [ dict( type=LinearLR, start_factor=1e-5, by_epoch=True, begin=0, end=warmup_ratio max_epochs, convert_to_iter_based=True), dict( type=CosineAnnealingLR, eta_min=0.0, by_epoch=True, begin=warmup_ratio max_epochs, end=max_epochs, convert_to_iter_based=True) ]

train, val, test setting

train_cfg = dict(type=TrainLoop, max_epochs=max_epochs)

#######################################################################

PART 5 Runtime

#######################################################################

Log the dialogue periodically during the training process, optional

custom_hooks = [ dict(type=DatasetInfoHook, tokenizer=tokenizer), dict( type=EvaluateChatHook, tokenizer=tokenizer, every_n_iters=evaluation_freq, evaluation_inputs=evaluation_inputs, system=SYSTEM, prompt_template=prompt_template) ]

if use_varlen_attn: custom_hooks += [dict(type=VarlenAttnArgsToMessageHubHook)]

configure default hooks

default_hooks = dict(

record the time of every iteration.

timer=dict(type=IterTimerHook),
# print log every 10 iterations.
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10),
# enable the parameter scheduler.
param_scheduler=dict(type=ParamSchedulerHook),
# save checkpoint per `save_steps`.
checkpoint=dict(
    type=CheckpointHook,
    by_epoch=False,
    interval=save_steps,
    max_keep_ckpts=save_total_limit),
# set sampler seed in distributed evrionment.
sampler_seed=dict(type=DistSamplerSeedHook),

)

configure environment

env_cfg = dict(

whether to enable cudnn benchmark

cudnn_benchmark=False,
# set multi process parameters
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
# set distributed parameters
dist_cfg=dict(backend='nccl'),

)

set visualizer

visualizer = None

set log level

log_level = 'INFO'

load from which checkpoint

load_from = None

whether to resume training from the loaded checkpoint

resume = False

Defaults to use random seed and disable deterministic

randomness = dict(seed=None, deterministic=False)

set log processor

log_processor = dict(by_epoch=False)`