Yuan2-M32-hf-int4 / HF-inference.py
IEIT-Yuan's picture
model scope
57d9b6d
raw
history blame contribute delete
No virus
1.26 kB
import torch , transformers
import sys, os
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from transformers import LlamaTokenizer, TextGenerationPipeline, AutoModelForCausalLM
from yuan_moe_hf_model import YuanForCausalLM
#from optimum.gptq import GPTQQuantizer, load_quantized_model
#from accelerate import init_empty_weights
device = "cuda"
quantized_model_dir = "/temp_data/LLM_test/MOE/Yuan2-M32-int4-hf"
tokenizer = LlamaTokenizer.from_pretrained('/temp_data/LLM_test/MOE/Yuan2-M32-int4-hf/', add_eos_token=False, add_bos_token=False, eos_token='<eod>')
tokenizer.add_tokens(['<sep>', '<pad>', '<mask>', '<predict>', '<FIM_SUFFIX>', '<FIM_PREFIX>', '<FIM_MIDDLE>','<commit_before>','<commit_msg>','<commit_after>','<jupyter_start>','<jupyter_text>','<jupyter_code>','<jupyter_output>','<empty_output>'], special_tokens=True)
model = YuanForCausalLM.from_pretrained(quantized_model_dir, trust_remote_code=True, use_safetensors=True).to(device)
print(tokenizer.decode(model.generate(**tokenizer("北京是中国的", return_tensors="pt").to(device), max_new_tokens=256)[0]))
#pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer)
#print(pipeline("北京是中国的")[0]["generated_text"])