Closed francoishernandez closed 3 months ago
These are minimal changes to improve dumped config readability:
Example with llama3 convert:
Before
{ "n_sample": 0, "training": { "accum_count": [ 32 ], "w_bit": 0, "valid_batch_size": 256, "batch_size": 896, "model_dtype": "fp16", "batch_size_multiple": 1, "normalization": "tokens", "quant_layers": [], "quant_type": "", "group_size": 0, "accum_steps": [ 0 ], "optim": "fusedadam", "batch_type": "tokens" }, "decoder_start_token": "<s>", "src_vocab": null, "tgt_vocab": null, "skip_empty_level": "silent", "model": { "add_ffnbias": false, "architecture": "transformer_lm", "add_qkvbias": false, "num_experts": 0, "heads": 32, "embeddings": { "src_word_vec_size": 4096, "tgt_word_vec_size": 4096 }, "mlp_activation_fn": "gated-silu", "rotary_interleave": false, "left_pad": true, "model_type": "text", "rotary_theta": 500000, "sliding_window": 0, "layer_norm": "rms", "layers": 32, "rotary_dim": 0, "num_experts_per_tok": 0, "parallel_residual": false, "decoder": { "decoder_type": "transformer_lm", "tgt_word_vec_size": 4096 }, "shared_layer_norm": false, "heads_kv": 8, "hidden_size": 4096, "transformer_ff": 14336, "norm_eps": 1e-05, "max_relative_positions": -1 }, "transforms_configs": { "filtertoolong": { "src_seq_length": 512, "tgt_seq_length": 512 } }, "vocab_size_multiple": 8, "share_vocab": true, "save_data": null, "data": null, "src_vocab_size": 128256, "transforms": [ "filtertoolong" ], "data_task": "lm", "tgt_vocab_size": 128256 }
After
{ "transforms": [ "filtertoolong" ], "src_vocab_size": 128256, "decoder_start_token": "<s>", "share_vocab": true, "vocab_size_multiple": 8, "save_data": null, "skip_empty_level": "silent", "data": null, "tgt_vocab": null, "tgt_vocab_size": 128256, "n_sample": 0, "src_vocab": null, "training": { "group_size": 0, "w_bit": 0, "quant_layers": [], "valid_batch_size": 256, "accum_steps": [ 0 ], "normalization": "tokens", "model_dtype": "fp16", "batch_size": 896, "quant_type": "", "batch_type": "tokens", "accum_count": [ 32 ], "batch_size_multiple": 1, "optim": "fusedadam" }, "transforms_configs": { "filtertoolong": { "tgt_seq_length": 512, "src_seq_length": 512 } }, "model": { "heads_kv": 8, "architecture": "transformer_lm", "rotary_theta": 500000, "mlp_activation_fn": "gated-silu", "left_pad": true, "parallel_residual": false, "hidden_size": 4096, "layer_norm": "rms", "norm_eps": 1e-05, "add_ffnbias": false, "sliding_window": 0, "transformer_ff": 14336, "layers": 32, "rotary_dim": 0, "rotary_interleave": false, "shared_layer_norm": false, "heads": 32, "num_experts": 0, "add_qkvbias": false, "num_experts_per_tok": 0, "max_relative_positions": -1, "embeddings": { "src_word_vec_size": 4096, "tgt_word_vec_size": 4096 }, "decoder": { "decoder_type": "transformer_lm", "tgt_word_vec_size": 4096 } } }
These are minimal changes to improve dumped config readability:
Example with llama3 convert:
Before
After