File size: 2,000 Bytes
d4c9a92
 
0464b4c
 
af7806f
d4c9a92
 
abea35b
d4c9a92
 
 
 
0464b4c
 
d4c9a92
0464b4c
 
 
7161b69
0464b4c
 
 
abea35b
d4c9a92
 
 
 
 
 
 
 
4fbe483
d4c9a92
 
 
 
 
 
 
4fbe483
d4c9a92
 
 
 
 
 
 
 
7161b69
d4c9a92
 
 
0464b4c
d4c9a92
 
7161b69
 
d4c9a92
 
7161b69
b1d449b
d4c9a92
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
import spaces  # Import spaces at the top
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# Import the GPU decorator
from spaces import GPU

# Set the device to use GPU
device = "cuda"  # Use CUDA for GPU

# Initialize model and tokenizer
peft_model_id = "CMLM/ZhongJing-2-1_8b"
base_model_id = "Qwen/Qwen1.5-1.8B-Chat"
model = AutoModelForCausalLM.from_pretrained(base_model_id, device_map={"cuda": 0})
model.load_adapter(peft_model_id)
tokenizer = AutoTokenizer.from_pretrained(
    "CMLM/ZhongJing-2-1_8b",
    padding_side="right",
    trust_remote_code=True,
    pad_token=''
)

@GPU(duration=120)  # Decorate with GPU usage and specify the duration
def get_model_response(question):
    # Create the prompt without context
    prompt = f"Question: {question}"
    messages = [
        {"role": "system", "content": "You are a helpful TCM medical assistant named 仲景中医大语言模型, created by 医哲未来 of Fudan University."},
        {"role": "user", "content": prompt}
    ]

    # Prepare the input
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(device)

    # Generate the response
    generated_ids = model.generate(
        model_inputs.input_ids,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]

    # Decode the response
    response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return response

iface = gr.Interface(
    fn=get_model_response,  # Directly use the decorated function
    inputs=["text"],
    outputs="text",
    title="仲景GPT-V2-1.8B",
    description="博极医源,精勤不倦。Unlocking the Wisdom of Traditional Chinese Medicine with AI."
)

# Launch the interface with sharing enabled
iface.launch(share=True)