open-mmlab / mmengine

OpenMMLab Foundational Library for Training Deep Learning Models
https://mmengine.readthedocs.io/
Apache License 2.0
1.13k stars 339 forks source link

[Bug] xtuner wandb #1342

Closed vansin closed 10 months ago

vansin commented 1 year ago
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from bitsandbytes.optim import PagedAdamW32bit
from datasets import load_dataset
from mmengine.dataset import DefaultSampler
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
                            LoggerHook, ParamSchedulerHook)
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
from peft import LoraConfig
from transformers import (AutoModelForCausalLM, AutoTokenizer,
                          BitsAndBytesConfig)

from xtuner.dataset import process_hf_dataset
from xtuner.dataset.collate_fns import default_collate_fn
from xtuner.dataset.map_fns import arxiv_map_fn, template_map_fn_factory
from xtuner.engine import DatasetInfoHook, EvaluateChatHook
from xtuner.model import SupervisedFinetune
from xtuner.utils import PROMPT_TEMPLATE

#######################################################################
#                          PART 1  Settings                           #
#######################################################################
# Model
pretrained_model_name_or_path = 'internlm/internlm-7b'

# Data
# 1. Download data from https://kaggle.com/datasets/Cornell-University/arxiv
# 2. Process data by `xtuner preprocess arxiv ${DOWNLOADED_DATA} ./data/arxiv_data.json [optional arguments]`  # noqa: E501
data_path = './data/arxiv_data.json'
prompt_template = PROMPT_TEMPLATE.title
max_length = 2048
pack_to_max_length = True

# Scheduler & Optimizer
batch_size = 1  # per_device
accumulative_counts = 16
dataloader_num_workers = 0
max_epochs = 3
optim_type = PagedAdamW32bit
lr = 2e-4
betas = (0.9, 0.999)
weight_decay = 0
max_norm = 1  # grad clip

# Evaluate the generation performance during the training
evaluation_freq = 500
evaluation_inputs = [
    ('We present InternLM, a multilingual foundational language '
     'model with 104B parameters. InternLM is pre-trained on a large '
     'corpora with 1.6T tokens with a multi-phase progressive '
     'process, and then fine-tuned to align with human preferences. '
     'We also developed a training system called Uniscale-LLM for '
     'efficient large language model training. The evaluation on a '
     'number of benchmarks shows that InternLM achieves '
     'state-of-the-art performance in multiple aspects, including '
     'knowledge understanding, reading comprehension, mathematics, '
     'and coding. With such well-rounded capabilities, InternLM '
     'achieves outstanding performances on comprehensive exams, '
     'including MMLU, AGIEval, C-Eval and GAOKAO-Bench, without '
     'resorting to external tools. On these benchmarks, InternLM '
     'not only significantly outperforms open-source models, but '
     'also obtains superior performance compared to ChatGPT. Also, '
     'InternLM demonstrates excellent capability of understanding '
     'Chinese language and Chinese culture, which makes it a '
     'suitable foundation model to support Chinese-oriented language '
     'applications. This manuscript gives a detailed study of '
     'our results, with benchmarks and examples across a diverse '
     'set of knowledge domains and tasks.'),
    ('In this work, we develop and release Llama 2, a collection of '
     'pretrained and fine-tuned large language models (LLMs) ranging '
     'in scale from 7 billion to 70 billion parameters.\nOur '
     'fine-tuned LLMs, called LLAMA 2-CHAT, are optimized for '
     'dialogue use cases. Our models outperform open-source chat '
     'models on most benchmarks we tested, and based on our human '
     'evaluations for helpfulness and safety, may be a suitable '
     'substitute for closedsource models. We provide a detailed '
     'description of our approach to fine-tuning and safety '
     'improvements of LLAMA 2-CHAT in order to enable the community '
     'to build on our work and contribute to the responsible '
     'development of LLMs.')
]

#######################################################################
#                      PART 2  Model & Tokenizer                      #
#######################################################################
tokenizer = dict(
    type=AutoTokenizer.from_pretrained,
    pretrained_model_name_or_path=pretrained_model_name_or_path,
    trust_remote_code=True,
    padding_side='right')

model = dict(
    type=SupervisedFinetune,
    llm=dict(
        type=AutoModelForCausalLM.from_pretrained,
        pretrained_model_name_or_path=pretrained_model_name_or_path,
        trust_remote_code=True,
        torch_dtype=torch.float16,
        quantization_config=dict(
            type=BitsAndBytesConfig,
            load_in_4bit=True,
            load_in_8bit=False,
            llm_int8_threshold=6.0,
            llm_int8_has_fp16_weight=False,
            bnb_4bit_compute_dtype=torch.float16,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type='nf4')),
    lora=dict(
        type=LoraConfig,
        r=64,
        lora_alpha=16,
        lora_dropout=0.1,
        bias='none',
        task_type='CAUSAL_LM'))

#######################################################################
#                      PART 3  Dataset & Dataloader                   #
#######################################################################
train_dataset = dict(
    type=process_hf_dataset,
    dataset=dict(
        type=load_dataset, path='json', data_files=dict(train=data_path)),
    tokenizer=tokenizer,
    max_length=max_length,
    dataset_map_fn=arxiv_map_fn,
    template_map_fn=dict(
        type=template_map_fn_factory, template=prompt_template),
    remove_unused_columns=True,
    shuffle_before_pack=True,
    pack_to_max_length=pack_to_max_length)

train_dataloader = dict(
    batch_size=batch_size,
    num_workers=dataloader_num_workers,
    dataset=train_dataset,
    sampler=dict(type=DefaultSampler, shuffle=True),
    collate_fn=dict(type=default_collate_fn))

#######################################################################
#                    PART 4  Scheduler & Optimizer                    #
#######################################################################
# optimizer
optim_wrapper = dict(
    type=AmpOptimWrapper,
    optimizer=dict(
        type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
    clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
    accumulative_counts=accumulative_counts,
    loss_scale='dynamic',
    dtype='float16')

# learning policy
# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md  # noqa: E501
param_scheduler = dict(
    type=CosineAnnealingLR,
    eta_min=lr * 0.1,
    by_epoch=True,
    T_max=max_epochs,
    convert_to_iter_based=True)

# train, val, test setting
train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)

#######################################################################
#                           PART 5  Runtime                           #
#######################################################################
# Log the dialogue periodically during the training process, optional
custom_hooks = [
    dict(type=DatasetInfoHook, tokenizer=tokenizer),
    dict(
        type=EvaluateChatHook,
        tokenizer=tokenizer,
        every_n_iters=evaluation_freq,
        evaluation_inputs=evaluation_inputs,
        instruction=prompt_template.INSTRUCTION_START)
]

# configure default hooks
default_hooks = dict(
    # record the time of every iteration.
    timer=dict(type=IterTimerHook),
    # print log every 100 iterations.
    logger=dict(type=LoggerHook, interval=10),
    # enable the parameter scheduler.
    param_scheduler=dict(type=ParamSchedulerHook),
    # save checkpoint per epoch.
    checkpoint=dict(type=CheckpointHook, interval=1),
    # set sampler seed in distributed evrionment.
    sampler_seed=dict(type=DistSamplerSeedHook),
)

# configure environment
env_cfg = dict(
    # whether to enable cudnn benchmark
    cudnn_benchmark=False,
    # set multi process parameters
    mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
    # set distributed parameters
    dist_cfg=dict(backend='nccl'),
)

# set visualizer
from mmengine.visualization.vis_backend import WandbVisBackend
from mmengine.visualization.visualizer import Visualizer

# set log level
log_level = 'INFO'

# load from which checkpoint
load_from = None

# whether to resume training from the loaded checkpoint
resume = False

# Defaults to use random seed and disable `deterministic`
randomness = dict(seed=None, deterministic=False)

visualizer = dict(
    type=Visualizer,
    vis_backends=[
        dict(type=WandbVisBackend, init_kwargs=dict(project='LLM'))
    ])
(xtuner) ➜  xtuner git:(main) ✗ xtuner train xtuner/configs/internlm/internlm_7b/internlm_7b_qlora_arxiv_gentitle_e3.py
[2023-09-06 13:52:29,504] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
[2023-09-06 13:52:34,580] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect)
09/06 13:52:38 - mmengine - INFO - 
------------------------------------------------------------
System environment:
    sys.platform: linux
    Python: 3.10.12 (main, Jul  5 2023, 18:54:27) [GCC 11.2.0]
    CUDA available: True
    numpy_random_seed: 320188416
    GPU 0: NVIDIA A100-SXM4-80GB
    CUDA_HOME: /usr/local/cuda
    NVCC: Cuda compilation tools, release 11.1, V11.1.105
    GCC: gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0
    PyTorch: 2.0.1
    PyTorch compiling details: PyTorch built with:
  - GCC 9.3
  - C++ Version: 201703
  - Intel(R) oneAPI Math Kernel Library Version 2023.1-Product Build 20230303 for Intel(R) 64 architecture applications
  - Intel(R) MKL-DNN v2.7.3 (Git Hash 6dbeffbae1f23cbbeae17adb7b5b13f1f37c080e)
  - OpenMP 201511 (a.k.a. OpenMP 4.5)
  - LAPACK is enabled (usually provided by MKL)
  - NNPACK is enabled
  - CPU capability usage: AVX2
  - CUDA Runtime 11.7
  - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86;-gencode;arch=compute_37,code=compute_37
  - CuDNN 8.5
  - Magma 2.6.1
  - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.7, CUDNN_VERSION=8.5.0, CXX_COMPILER=/opt/rh/devtoolset-9/root/usr/bin/c++, CXX_FLAGS= -D_GLIBCXX_USE_CXX11_ABI=0 -fabi-version=11 -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_DISABLE_GPU_ASSERTS=ON, TORCH_VERSION=2.0.1, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, 

    TorchVision: 0.15.2
    OpenCV: 4.8.0
    MMEngine: 0.8.4

Runtime environment:
    cudnn_benchmark: False
    mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}
    dist_cfg: {'backend': 'nccl'}
    seed: 320188416
    deterministic: False
    Distributed launcher: none
    Distributed training: False
    GPU number: 1
------------------------------------------------------------

09/06 13:52:38 - mmengine - INFO - Config:
accumulative_counts = 16
batch_size = 1
betas = (
    0.9,
    0.999,
)
custom_hooks = [
    dict(
        tokenizer=dict(
            padding_side='right',
            pretrained_model_name_or_path='internlm/internlm-7b',
            trust_remote_code=True,
            type='transformers.AutoTokenizer.from_pretrained'),
        type='xtuner.engine.DatasetInfoHook'),
    dict(
        evaluation_inputs=[
            'We present InternLM, a multilingual foundational language model with 104B parameters. InternLM is pre-trained on a large corpora with 1.6T tokens with a multi-phase progressive process, and then fine-tuned to align with human preferences. We also developed a training system called Uniscale-LLM for efficient large language model training. The evaluation on a number of benchmarks shows that InternLM achieves state-of-the-art performance in multiple aspects, including knowledge understanding, reading comprehension, mathematics, and coding. With such well-rounded capabilities, InternLM achieves outstanding performances on comprehensive exams, including MMLU, AGIEval, C-Eval and GAOKAO-Bench, without resorting to external tools. On these benchmarks, InternLM not only significantly outperforms open-source models, but also obtains superior performance compared to ChatGPT. Also, InternLM demonstrates excellent capability of understanding Chinese language and Chinese culture, which makes it a suitable foundation model to support Chinese-oriented language applications. This manuscript gives a detailed study of our results, with benchmarks and examples across a diverse set of knowledge domains and tasks.',
            'In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters.\nOur fine-tuned LLMs, called LLAMA 2-CHAT, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closedsource models. We provide a detailed description of our approach to fine-tuning and safety improvements of LLAMA 2-CHAT in order to enable the community to build on our work and contribute to the responsible development of LLMs.',
        ],
        every_n_iters=500,
        instruction='xtuner.utils.PROMPT_TEMPLATE.title.INSTRUCTION_START',
        tokenizer=dict(
            padding_side='right',
            pretrained_model_name_or_path='internlm/internlm-7b',
            trust_remote_code=True,
            type='transformers.AutoTokenizer.from_pretrained'),
        type='xtuner.engine.EvaluateChatHook'),
]
data_path = './data/arxiv_data.json'
dataloader_num_workers = 0
default_hooks = dict(
    checkpoint=dict(interval=1, type='mmengine.hooks.CheckpointHook'),
    logger=dict(interval=10, type='mmengine.hooks.LoggerHook'),
    param_scheduler=dict(type='mmengine.hooks.ParamSchedulerHook'),
    sampler_seed=dict(type='mmengine.hooks.DistSamplerSeedHook'),
    timer=dict(type='mmengine.hooks.IterTimerHook'))
env_cfg = dict(
    cudnn_benchmark=False,
    dist_cfg=dict(backend='nccl'),
    mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
evaluation_freq = 500
evaluation_inputs = [
    'We present InternLM, a multilingual foundational language model with 104B parameters. InternLM is pre-trained on a large corpora with 1.6T tokens with a multi-phase progressive process, and then fine-tuned to align with human preferences. We also developed a training system called Uniscale-LLM for efficient large language model training. The evaluation on a number of benchmarks shows that InternLM achieves state-of-the-art performance in multiple aspects, including knowledge understanding, reading comprehension, mathematics, and coding. With such well-rounded capabilities, InternLM achieves outstanding performances on comprehensive exams, including MMLU, AGIEval, C-Eval and GAOKAO-Bench, without resorting to external tools. On these benchmarks, InternLM not only significantly outperforms open-source models, but also obtains superior performance compared to ChatGPT. Also, InternLM demonstrates excellent capability of understanding Chinese language and Chinese culture, which makes it a suitable foundation model to support Chinese-oriented language applications. This manuscript gives a detailed study of our results, with benchmarks and examples across a diverse set of knowledge domains and tasks.',
    'In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters.\nOur fine-tuned LLMs, called LLAMA 2-CHAT, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closedsource models. We provide a detailed description of our approach to fine-tuning and safety improvements of LLAMA 2-CHAT in order to enable the community to build on our work and contribute to the responsible development of LLMs.',
]
launcher = 'none'
load_from = None
log_level = 'INFO'
lr = 0.0002
max_epochs = 3
max_length = 2048
max_norm = 1
model = dict(
    llm=dict(
        pretrained_model_name_or_path='internlm/internlm-7b',
        quantization_config=dict(
            bnb_4bit_compute_dtype='torch.float16',
            bnb_4bit_quant_type='nf4',
            bnb_4bit_use_double_quant=True,
            llm_int8_has_fp16_weight=False,
            llm_int8_threshold=6.0,
            load_in_4bit=True,
            load_in_8bit=False,
            type='transformers.BitsAndBytesConfig'),
        torch_dtype='torch.float16',
        trust_remote_code=True,
        type='transformers.AutoModelForCausalLM.from_pretrained'),
    lora=dict(
        bias='none',
        lora_alpha=16,
        lora_dropout=0.1,
        r=64,
        task_type='CAUSAL_LM',
        type='peft.LoraConfig'),
    type='xtuner.model.SupervisedFinetune')
optim_type = 'bitsandbytes.optim.PagedAdamW32bit'
optim_wrapper = dict(
    accumulative_counts=16,
    clip_grad=dict(error_if_nonfinite=False, max_norm=1),
    dtype='float16',
    loss_scale='dynamic',
    optimizer=dict(
        betas=(
            0.9,
            0.999,
        ),
        lr=0.0002,
        type='bitsandbytes.optim.PagedAdamW32bit',
        weight_decay=0),
    type='mmengine.optim.AmpOptimWrapper')
pack_to_max_length = True
param_scheduler = dict(
    T_max=3,
    by_epoch=True,
    convert_to_iter_based=True,
    eta_min=2e-05,
    type='mmengine.optim.CosineAnnealingLR')
pretrained_model_name_or_path = 'internlm/internlm-7b'
prompt_template = 'xtuner.utils.PROMPT_TEMPLATE.title'
randomness = dict(deterministic=False, seed=None)
resume = False
tokenizer = dict(
    padding_side='right',
    pretrained_model_name_or_path='internlm/internlm-7b',
    trust_remote_code=True,
    type='transformers.AutoTokenizer.from_pretrained')
train_cfg = dict(by_epoch=True, max_epochs=3, val_interval=1)
train_dataloader = dict(
    batch_size=1,
    collate_fn=dict(type='xtuner.dataset.collate_fns.default_collate_fn'),
    dataset=dict(
        dataset=dict(
            data_files=dict(train='./data/arxiv_data.json'),
            path='json',
            type='datasets.load_dataset'),
        dataset_map_fn='xtuner.dataset.map_fns.arxiv_map_fn',
        max_length=2048,
        pack_to_max_length=True,
        remove_unused_columns=True,
        shuffle_before_pack=True,
        template_map_fn=dict(
            template='xtuner.utils.PROMPT_TEMPLATE.title',
            type='xtuner.dataset.map_fns.template_map_fn_factory'),
        tokenizer=dict(
            padding_side='right',
            pretrained_model_name_or_path='internlm/internlm-7b',
            trust_remote_code=True,
            type='transformers.AutoTokenizer.from_pretrained'),
        type='xtuner.dataset.process_hf_dataset'),
    num_workers=0,
    sampler=dict(shuffle=True, type='mmengine.dataset.DefaultSampler'))
train_dataset = dict(
    dataset=dict(
        data_files=dict(train='./data/arxiv_data.json'),
        path='json',
        type='datasets.load_dataset'),
    dataset_map_fn='xtuner.dataset.map_fns.arxiv_map_fn',
    max_length=2048,
    pack_to_max_length=True,
    remove_unused_columns=True,
    shuffle_before_pack=True,
    template_map_fn=dict(
        template='xtuner.utils.PROMPT_TEMPLATE.title',
        type='xtuner.dataset.map_fns.template_map_fn_factory'),
    tokenizer=dict(
        padding_side='right',
        pretrained_model_name_or_path='internlm/internlm-7b',
        trust_remote_code=True,
        type='transformers.AutoTokenizer.from_pretrained'),
    type='xtuner.dataset.process_hf_dataset')
visualizer = dict(
    type='mmengine.visualization.visualizer.Visualizer',
    vis_backends=[
        dict(
            init_kwargs=dict(project='LLM'),
            type='mmengine.visualization.vis_backend.WandbVisBackend'),
    ])
weight_decay = 0
work_dir = './work_dirs/internlm_7b_qlora_arxiv_gentitle_e3'

wandb: Currently logged in as: vansin. Use `wandb login --relogin` to force relogin
wandb: Tracking run with wandb version 0.15.9
wandb: Run data is saved locally in /cpfs01/user/huwenxing/xtuner/work_dirs/internlm_7b_qlora_arxiv_gentitle_e3/20230906_135237/vis_data/wandb/run-20230906_135240-dbgddmbq
wandb: Run `wandb offline` to turn off syncing.
wandb: Syncing run leafy-wind-2
wandb: ⭐️ View project at https://wandb.ai/vansin/LLM
wandb: 🚀 View run at https://wandb.ai/vansin/LLM/runs/dbgddmbq
Traceback (most recent call last):
  File "/cpfs01/user/huwenxing/xtuner/xtuner/tools/train.py", line 225, in <module>
    main()
  File "/cpfs01/user/huwenxing/xtuner/xtuner/tools/train.py", line 214, in main
    runner = Runner.from_cfg(cfg)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/mmengine/runner/runner.py", line 445, in from_cfg
    runner = cls(
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/mmengine/runner/runner.py", line 401, in __init__
    self.visualizer.add_config(self.cfg)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/mmengine/dist/utils.py", line 401, in wrapper
    return func(*args, **kwargs)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/mmengine/visualization/visualizer.py", line 1071, in add_config
    vis_backend.add_config(config, **kwargs)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/mmengine/visualization/vis_backend.py", line 59, in wrapper
    return old_func(obj, *args, **kwargs)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/mmengine/visualization/vis_backend.py", line 439, in add_config
    self._wandb.config.update(
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/wandb/sdk/wandb_config.py", line 186, in update
    self._callback(data=sanitized)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/wandb/sdk/wandb_run.py", line 341, in wrapper_fn
    return func(self, *args, **kwargs)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/wandb/sdk/wandb_run.py", line 1284, in _config_callback
    self._backend.interface.publish_config(key=key, val=val, data=data)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/wandb/sdk/interface/interface.py", line 205, in publish_config
    cfg = self._make_config(data=data, key=key, val=val)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/wandb/sdk/interface/interface.py", line 160, in _make_config
    update.value_json = json_dumps_safer(json_friendly(v)[0])
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/wandb/util.py", line 820, in json_dumps_safer
    return dumps(obj, cls=WandBJSONEncoder, **kwargs)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/json/__init__.py", line 238, in dumps
    **kw).encode(obj)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/json/encoder.py", line 199, in encode
    chunks = self.iterencode(o, _one_shot=True)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/json/encoder.py", line 257, in iterencode
    return _iterencode(o, 0)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/site-packages/wandb/util.py", line 771, in default
    return json.JSONEncoder.default(self, obj)
  File "/cpfs01/user/huwenxing/miniconda/envs/xtuner/lib/python3.10/json/encoder.py", line 179, in default
    raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type module is not JSON serializable
wandb: Waiting for W&B process to finish... (failed 1). Press Control-C to abort syncing.
wandb: 🚀 View run leafy-wind-2 at: https://wandb.ai/vansin/LLM/runs/dbgddmbq
wandb: ️⚡ View job at https://wandb.ai/vansin/LLM/jobs/QXJ0aWZhY3RDb2xsZWN0aW9uOjk1MzkwNzM5/version_details/v0
wandb: Synced 6 W&B file(s), 0 media file(s), 0 artifact file(s) and 0 other file(s)
wandb: Find logs at: ./work_dirs/internlm_7b_qlora_arxiv_gentitle_e3/20230906_135237/vis_data/wandb/run-20230906_135240-dbgddmbq/logs
(xtuner) ➜  xtuner git:(main) ✗ pyd train xtuner/configs/internlm/internlm_7b/internlm_7b_qlora_arxiv_gentitle_e3.py

Reproduces the problem - command or script

1

Reproduces the problem - error message

1

Additional information

1

LZHgrla commented 1 year ago

It seems that this bug is caused by dumping a lazy config to json.