import torch , transformers import sys, os sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) from transformers import LlamaTokenizer, TextGenerationPipeline, AutoModelForCausalLM from yuan_moe_hf_model import YuanForCausalLM #from optimum.gptq import GPTQQuantizer, load_quantized_model #from accelerate import init_empty_weights device = "cuda" quantized_model_dir = "/temp_data/LLM_test/MOE/Yuan2-M32-int4-hf" tokenizer = LlamaTokenizer.from_pretrained('/temp_data/LLM_test/MOE/Yuan2-M32-int4-hf/', add_eos_token=False, add_bos_token=False, eos_token='') tokenizer.add_tokens(['', '', '', '', '', '', '','','','','','','','',''], special_tokens=True) model = YuanForCausalLM.from_pretrained(quantized_model_dir, trust_remote_code=True, use_safetensors=True).to(device) print(tokenizer.decode(model.generate(**tokenizer("北京是中国的", return_tensors="pt").to(device), max_new_tokens=256)[0])) #pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer) #print(pipeline("北京是中国的")[0]["generated_text"])