import spaces # Import spaces at the top import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Import the GPU decorator from spaces import GPU # Set the device to use GPU device = "cuda" # Use CUDA for GPU # Initialize model and tokenizer peft_model_id = "CMLM/ZhongJing-2-1_8b" base_model_id = "Qwen/Qwen1.5-1.8B-Chat" model = AutoModelForCausalLM.from_pretrained(base_model_id, device_map={"cuda": 0}) model.load_adapter(peft_model_id) tokenizer = AutoTokenizer.from_pretrained( "CMLM/ZhongJing-2-1_8b", padding_side="right", trust_remote_code=True, pad_token='' ) @GPU(duration=120) # Decorate with GPU usage and specify the duration def get_model_response(question): # Create the prompt without context prompt = f"Question: {question}" messages = [ {"role": "system", "content": "You are a helpful TCM medical assistant named 仲景中医大语言模型, created by 医哲未来 of Fudan University."}, {"role": "user", "content": prompt} ] # Prepare the input text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) model_inputs = tokenizer([text], return_tensors="pt").to(device) # Generate the response generated_ids = model.generate( model_inputs.input_ids, max_new_tokens=512 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] # Decode the response response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] return response iface = gr.Interface( fn=get_model_response, # Directly use the decorated function inputs=["text"], outputs="text", title="仲景GPT-V2-1.8B", description="博极医源,精勤不倦。Unlocking the Wisdom of Traditional Chinese Medicine with AI." ) # Launch the interface with sharing enabled iface.launch(share=True)