代码拉取完成,页面将自动刷新
import argparse
import os
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from importlib.metadata import version
from lib.prune import prune_wanda, prune_magnitude, prune_sparsegpt, prune_ablate, check_sparsity, find_layers
from lib.eval import eval_ppl, eval_zero_shot
#我的
from peft import PeftModel, PeftConfig,LoraConfig,TaskType
from peft import get_peft_config, get_peft_model,prepare_model_for_kbit_training
from transformers import BitsAndBytesConfig
print('torch', version('torch'))
print('transformers', version('transformers'))
print('accelerate', version('accelerate'))
print('# of gpus: ', torch.cuda.device_count())
def get_llm(model_name, cache_dir="llm_weights"):
# model = AutoModelForCausalLM.from_pretrained(
# model_name,
# torch_dtype=torch.float16,
# cache_dir=cache_dir,
# low_cpu_mem_usage=True,
# device_map="auto"
# )
#我的
# 加载model
peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1)
# 默认在Q、V加LoRA
bnb_config = BitsAndBytesConfig(
load_in_4bit=True, # load the model into memory using 4-bit precision
bnb_4bit_use_double_quant=True, # use double quantition
bnb_4bit_quant_type="nf4", # use NormalFloat quantition
bnb_4bit_compute_dtype=torch.bfloat16 # use hf for computing when we need
)
# load model from huggingface
model = AutoModelForCausalLM.from_pretrained(
model_name,
quantization_config=bnb_config,
cache_dir=cache_dir,
device_map="auto",
low_cpu_mem_usage=True,
return_dict=True
)
# model = get_peft_model(model, peft_config)
model.seqlen = model.config.max_position_embeddings
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, help='LLaMA model')
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--nsamples', type=int, default=128, help='Number of calibration samples.')
parser.add_argument('--sparsity_ratio', type=float, default=0, help='Sparsity level')
parser.add_argument("--sparsity_type", type=str, choices=["unstructured", "4:8", "2:4"])
parser.add_argument("--prune_method", type=str, choices=["magnitude", "wanda", "sparsegpt",
"ablate_mag_seq", "ablate_wanda_seq", "ablate_mag_iter", "ablate_wanda_iter", "search"])
parser.add_argument("--cache_dir", default="llm_weights", type=str )
parser.add_argument('--use_variant', action="store_true", help="whether to use the wanda variant described in the appendix")
parser.add_argument('--save', type=str, default=None, help='Path to save results.')
parser.add_argument('--save_model', type=str, default=None, help='Path to save the pruned model.')
parser.add_argument("--eval_zero_shot", action="store_true")
args = parser.parse_args()
# Setting seeds for reproducibility
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
# Handling n:m sparsity
prune_n, prune_m = 0, 0
if args.sparsity_type != "unstructured":
assert args.sparsity_ratio == 0.5, "sparsity ratio must be 0.5 for structured N:M sparsity"
prune_n, prune_m = map(int, args.sparsity_type.split(":"))
model_name = args.model.split("/")[-1]
print(f"loading llm model {args.model}")
model = get_llm(args.model, args.cache_dir)
model.eval()
# tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=False)
#我的
tokenizer = AutoTokenizer.from_pretrained(
args.model,
cache_dir=args.cache_dir,
use_cache=True,
padding_side="right",
use_fast=True,
)
tokenizer.pad_token = tokenizer.eos_token
device = torch.device("cuda:0")
if "30b" in args.model or "65b" in args.model: # for 30b and 65b we use device_map to load onto multiple A6000 GPUs, thus the processing here.
device = model.hf_device_map["lm_head"]
print("use device ", device)
if args.sparsity_ratio != 0:
print("pruning starts")
if args.prune_method == "wanda":
prune_wanda(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m)
elif args.prune_method == "magnitude":
prune_magnitude(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m)
elif args.prune_method == "sparsegpt":
prune_sparsegpt(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m)
elif "ablate" in args.prune_method:
prune_ablate(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m)
################################################################
print("*"*30)
sparsity_ratio = check_sparsity(model)
print(f"sparsity sanity check {sparsity_ratio:.4f}")
print("*"*30)
################################################################
ppl_test = eval_ppl(args, model, tokenizer, device)
print(f"wikitext perplexity {ppl_test}")
if not os.path.exists(args.save):
os.makedirs(args.save)
save_filepath = os.path.join(args.save, f"log_{args.prune_method}.txt")
with open(save_filepath, "w") as f:
print("method\tactual_sparsity\tppl_test", file=f, flush=True)
print(f"{args.prune_method}\t{sparsity_ratio:.4f}\t{ppl_test:.4f}", file=f, flush=True)
if args.eval_zero_shot:
accelerate=False
if "30b" in args.model or "65b" in args.model or "70b" in args.model:
accelerate=True
task_list = ["boolq", "rte","hellaswag","winogrande", "arc_easy","arc_challenge", "openbookqa"]
num_shot = 0
results = eval_zero_shot(args.model, model, tokenizer, task_list, num_shot, accelerate)
print("********************************")
print("zero_shot evaluation results")
print(results)
if args.save_model:
model.save_pretrained(args.save_model)
tokenizer.save_pretrained(args.save_model)
if __name__ == '__main__':
main()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。