Closed qxpBlog closed 6 months ago
@qxpBlog Thanks for your interest on our work! It seems that you have modified the original code to use a custom model from another library LLMPruner. Can you first confirm that the forward (or the model.generate
function) works well for the model itself?
@qxpBlog Thanks for your interest on our work! It seems that you have modified the original code to use a custom model from another library LLMPruner. Can you first confirm that the forward (or the
model.generate
function) works well for the model itself?
yes, my module LLMPruner
can use the function model.generate()
:
tokenizer = LlamaTokenizer.from_pretrained(args.base_model)
model = LlamaForCausalLM.from_pretrained(
args.base_model,
low_cpu_mem_usage=True if args.torch_version >=1.9 else False
)
if args.device != "cpu":
model.half()
model.to(args.device)
if args.test_before_train:
logger.log("\n==================Generation Results before Pruning================\n")
model.eval()
with torch.no_grad():
for prompt in prompts:
input_ids = tokenizer(prompt, return_tensors="pt")['input_ids'].to(args.device)
generation_output = model.generate(
input_ids=input_ids,
do_sample=True,
top_k=50,
max_length=args.max_seq_len,
top_p=args.top_p,
temperature=args.temperature,
)
result = tokenizer.decode(generation_output[0])
logger.log(result)
ppl = PPLMetric(model, tokenizer, ['wikitext2', 'ptb'], args.max_seq_len, device=args.device)
logger.log("PPL before pruning: {}".format(ppl))
The example generate function does not share the same hyper-parameter settings in your code when using inference flan zero-shot. And also, since you comments the part of lorahub in your code, I do not think it would be the reason which brokens your code. You may try more on the model generate function to see if the exact settings work well.
The example generate function does not share the same hyper-parameter settings in your code when using inference flan zero-shot. And also, since you comments the part of lorahub in your code, I do not think it would be the reason which brokens your code. You may try more on the model generate function to see if the exact settings work well.
thans for answer, but i think the config of function generate is not the key problem. When I finish prunning my model, I directly call the function evaluate_flan_results_zero_shot
instead of saving it by `torch.save
and then loading it for use, so there won't be any errors:
import os
import gc
import sys
import time
import json
import copy
import random
import argparse
from typing import Tuple
import torch
import numpy as np
from transformers import LlamaTokenizer, GenerationConfig, LlamaConfig,AutoConfig
from LLMPruner.models.hf_llama.modeling_llama import LlamaForCausalLM, LlamaRMSNorm, LlamaAttention, LlamaMLP
import LLMPruner.torch_pruning as tp
from LLMPruner.pruner import hf_llama_pruner as llama_pruner
from LLMPruner.utils.logger import LoggerWithDepth
from LLMPruner.evaluator.ppl import PPLMetric
from LLMPruner.datasets.example_samples import get_examples
from LLMPruner.templates.prompts import prompts
import lorahub1.bbh
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def main(args):
set_random_seed(args.seed)
logger = LoggerWithDepth(
env_name="{}".format(args.save_ckpt_log_name),
config=args.__dict__,
root_dir='prune_log',
setup_sublogger=True
)
tokenizer = LlamaTokenizer.from_pretrained(args.base_model)
model = LlamaForCausalLM.from_pretrained(
args.base_model,
low_cpu_mem_usage=True if args.torch_version >=1.9 else False
)
if args.device != "cpu":
model.half()
model.to(args.device)
pruner_type = args.pruner_type.lower()
assert pruner_type in ['random', 'l2', 'l1', 'taylor']
for param in model.parameters():
param.requires_grad_(True)
before_pruning_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
forward_prompts = torch.tensor([
[ 1, 306, 4658, 278, 6593, 310, 2834, 338],
[ 1, 3439, 17632, 1925, 29892, 278, 6368, 310],
]).to(args.device) # Only for building the dependency graph. Any input will be fine since the computation result are not taken into consideration.
if pruner_type == 'random':
imp = tp.importance.RandomImportance()
elif pruner_type == 'l1':
imp = llama_pruner.MagnitudeImportance(p=1)
elif pruner_type == 'l2':
imp = llama_pruner.MagnitudeImportance(p=2)
elif pruner_type == 'taylor':
imp = llama_pruner.TaylorImportance(group_reduction=args.grouping_strategy, taylor=args.taylor)
else:
raise NotImplementedError
logger.log("Use {} pruner...".format(pruner_type))
# 按block剪枝
if args.block_wise:
logger.log("Start Pruning")
if len(args.pruning_ratio) == 1 and args.pruning_ratio[0] != 0:
args.pruning_ratio *= 32
for i, ratio in enumerate(args.pruning_ratio):
kwargs = {
"importance": imp,
"global_pruning": args.global_pruning,
"iterative_steps": args.iterative_steps,
"ch_sparsity": ratio,
"ignored_layers":[],
"channel_groups": {
},
"consecutive_groups": {
layer.self_attn.q_proj: layer.self_attn.head_dim for layer in model.model.layers
},
"customized_pruners": {
LlamaRMSNorm: llama_pruner.hf_rmsnorm_pruner,
},
"root_module_types": None,
"root_instances": [model.model.layers[i].self_attn.q_proj] +
[model.model.layers[i].mlp.gate_proj]
}
logger.log("Pruning Attention Layer = {}".format(i))
logger.log("Pruning MLP Layer = {}".format(i))
pruner = tp.pruner.MetaPruner(
model,
forward_prompts,
**kwargs
)
model.zero_grad()
logger.log("Pruning_ratio:{}".format(ratio))
for i in range(args.iterative_steps):
if pruner_type in ['taylor']:
example_prompts = get_examples('bookcorpus', tokenizer, args.num_examples, seq_len = 64).to(args.device)
logger.log("Start Backwarding in iterative steps = {}...".format(i))
if args.taylor in ['param_mix', 'param_second']:
for j in range(args.num_examples):
batch_input = example_prompts[j].unsqueeze(0)
loss = model(batch_input, labels=batch_input).loss
logger.log("Loss = {}".format(loss))
loss.backward()
for module_param in model.parameters():
module_param.grad = module_param.grad * module_param.grad / args.num_examples
if hasattr(module_param, 'acc_grad'):
module_param.acc_grad += module_param.grad
else:
module_param.acc_grad = copy.deepcopy(module_param.grad)
model.zero_grad()
del loss.grad
loss = model(example_prompts, labels=example_prompts).loss
logger.log("Loss = {}".format(loss))
loss.backward()
# 剪枝入口
pruner.step()
after_pruning_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.log("After Iter {}/{}, #parameters: {}".format(i+1, args.iterative_steps, after_pruning_parameters))
# modify inferece-related attributes
for layer in model.model.layers:
layer.self_attn.num_heads = layer.self_attn.q_proj.weight.data.shape[0] // layer.self_attn.head_dim
# Clean the gradient in the model
model.zero_grad()
for name, module in model.named_parameters():
if 'weight' in name:
module.grad = None
del pruner
elif args.channel_wise:
kwargs = {
"importance": imp,
"global_pruning": args.global_pruning,
"iterative_steps": args.iterative_steps,
"ch_sparsity": args.pruning_ratio[0], # remove 50% channels, ResNet18 = {64, 128, 256, 512} => ResNet18_Half = {32, 64, 128, 256}
"ignored_layers":[],
#"round_to": model.config.num_attention_heads * 2,
"channel_groups": {
#layer.self_attn: layer.self_attn.num_heads for layer in model.model.layers
},
"customized_pruners": {
LlamaRMSNorm: llama_pruner.hf_rmsnorm_pruner,
#LlamaAttention: llama_pruner.hf_attention_pruner,
},
"root_module_types": [LlamaRMSNorm, LlamaAttention],
}
pruner = tp.pruner.MetaPruner(
model,
forward_prompts,
**kwargs
)
model.zero_grad()
logger.log("Start Pruning")
for i in range(args.iterative_steps):
if pruner_type in ['taylor']:
example_prompts = get_examples('bookcorpus', tokenizer, 10, seq_len = 64)
logger.log("Start Backwarding in iterative steps = {}...".format(i))
loss = model(example_prompts, labels=example_prompts).loss
logger.log("Loss = {}".format(loss))
loss.backward()
pruner.step()
after_pruning_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
logger.log("After Iter {}/{}, #parameters: {}".format(i+1, args.iterative_steps, after_pruning_parameters))
# Clean the gradient in the model
model.zero_grad()
for name, module in model.named_parameters():
if 'weight' in name:
module.grad = None
# modify inferece-related attributes
model.config.hidden_size = model.model.embed_tokens.weight.shape[1]
model.zero_grad()
del pruner
elif args.layer_wise:
model.model.layers = model.model.layers[:args.layer]
after_pruning_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
raise NotImplementedError
logger.log("#Param before: {}, #Param after: {}, Ratio = {:.4f}%".format(before_pruning_parameters, after_pruning_parameters, 100.0*after_pruning_parameters/before_pruning_parameters))
gc.collect()
torch.cuda.empty_cache()
if args.save_model:
model.half()
torch.save({
'model': model,
'tokenizer': tokenizer,
}, logger.best_checkpoint_path)
if args.eval_device != "cuda":
model.half()
model.to(args.eval_device)
model.config.pad_token_id = tokenizer.pad_token_id = 0
model.config.bos_token_id = 1
model.config.eos_token_id = 2
if args.test_after_train:
logger.log("\n==================Generation Results After Pruning================\n")
model.eval()
with torch.no_grad():
lorahub1.bbh.evaluate_flan_results_zero_shot("data_bbh", model, tokenizer)
logger.log("\n==================Finish================\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pruning LLaMA (huggingface version)')
# argument for parsing
parser.add_argument('--base_model', type=str, default="/home/iotsc01/xinpengq/LLM-Pruner-main/llama-7b-hf", help='base model name')
parser.add_argument('--save_ckpt_log_name', type=str, default="llama_prune", help='the path for save the checkpoint and the log. The final path would be log/{your_name_here}_{pruner_type}_{pruning_ratio}')
# argument for parsing
parser.add_argument('--pruning_ratio', nargs='+', type=float, default=[], help='pruning ratio list')
parser.add_argument('--pruner_type', type=str, default='l2', help='pruner type')
# argument for generation
parser.add_argument('--temperature', type=float, default=1.0, help='temperature')
parser.add_argument('--top_p', type=float, default=0.95, help='top p')
parser.add_argument('--max_seq_len', type=int, default=128, help='max sequence length')
# argument for layer-wise pruning/column-wise pruning
parser.add_argument('--channel_wise', action='store_true', help='channel wise')
parser.add_argument('--block_wise', action='store_true', help='block wise')
parser.add_argument('--layer_wise', action='store_true', help='layer wise')
parser.add_argument('--layer', type=int, default=12, help='remain the previous n layers')
parser.add_argument('--block_attention_layer_start', type=int, help='start layer of block attention layers', default=3)
parser.add_argument('--block_attention_layer_end', type=int, help='end layer of block attention layers', default=31)
parser.add_argument('--block_mlp_layer_start', type=int, help='start layer of block mlp layers', default=3)
parser.add_argument('--block_mlp_layer_end', type=int, help='end layer of block mlp layers', default=31)
parser.add_argument('--iterative_steps', type=int, default=1, help="Iteration step for pruning. Default=1")
parser.add_argument('--grouping_strategy', type=str, default='sum', help='Reduce method for grouping')
parser.add_argument('--global_pruning', action='store_true', help='whether global pruning')
parser.add_argument('--taylor', type=str, default='param_first', help='choose from [vectorize, param_second, param_first, param_mix]')
parser.add_argument('--num_examples', type=int, default=10)
# general argument
parser.add_argument('--device', type=str, default="cuda", help='device')
parser.add_argument('--test_before_train', action='store_true', help='whether test before train')
parser.add_argument('--eval_device', type=str, default="cuda", help='eval device')
parser.add_argument('--test_after_train', action='store_true', help='whether test after train')
parser.add_argument('--seed', type=int, default=42, help='seed')
parser.add_argument('--save_model', action='store_true', help='if save model')
args = parser.parse_args()
torch_version = float('.'.join(torch.__version__.split('.')[:2]))
args.torch_version = torch_version
main(args)
@mavenlin @SivilTaram @P2333 @chenxwh @Boyu-Mi i want to evaluate my pruned llama-7b model (saved by torch.save), but the following error happend: ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /home/iotsc01/xinpengq/LLM-Pruner-main/lorahub-main/reproduce_bbh.py:181 in │
│ │
│ 178 │ │ # unzip │
│ 179 │ │ os.system("unzip data_bbh.zip") │
│ 180 │ # evaluate the model │
│ ❱ 181 │ evaluate_flan_results_zero_shot("data_bbh", args.ckpt) │
│ 182 │ # # five shot for flan models │
│ 183 │ # evaluate_flan_results_few_shot("data_bbh", "google/flan-t5-large") │
│ 184 │ # # five shot for lorahub models │
│ │
│ /home/iotsc01/xinpengq/LLM-Pruner-main/lorahub-main/reproduce_bbh.py:63 in │
│ evaluate_flan_results_zero_shot │
│ │
│ 60 │ │ │ │ return_tensors="pt", │
│ 61 │ │ │ │ padding=True, │
│ 62 │ │ │ ).to(model.device) │
│ ❱ 63 │ │ │ outputs = model.generate( │
│ 64 │ │ │ │ input_ids=inputs["input_ids"], max_new_tokens=5 │
│ 65 │ │ │ ) │
│ 66 │ │ │ outputs = tokenizer.batch_decode( │
│ │
│ /home/iotsc01/anaconda3/envs/xinpengq_env/lib/python3.10/site-packages/torch/utils/_contextlib.p │
│ y:115 in decorate_context │
│ │
│ 112 │ @functools.wraps(func) │
│ 113 │ def decorate_context(*args, *kwargs): │
│ 114 │ │ with ctx_factory(): │
│ ❱ 115 │ │ │ return func(args, **kwargs) │
│ 116 │ │
│ 117 │ return decorate_context │
│ 118 │
│ │
│ /home/iotsc01/anaconda3/envs/xinpengq_env/lib/python3.10/site-packages/transformers/generation/u │
│ tils.py:1456 in generate │
│ │
│ 1453 │ │ │
│ 1454 │ │ # if we don't pass
past_key_values
and a cache_implementation is specified │ │ 1455 │ │ │ │ ❱ 1456 │ │ if generation_config.cache_implementation in NEED_SETUP_CACHE_CLASSES_MAPPING an │ │ 1457 │ │ │ "past_key_values", False │ │ 1458 │ │ ): │ │ 1459 │ │ │ cache_cls = NEED_SETUP_CACHE_CLASSES_MAPPING[generation_config.cache_impleme │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ AttributeError: 'GenerationConfig' object has no attribute 'cache_implementation'how can i solve it? the following is my code: