Mistral-lab / app.py
vilarin's picture
Update app.py
84db5e8 verified
raw
history blame
4 kB
import torch
import copy
import gradio as gr
import spaces
from llama_cpp import Llama
import os
from huggingface_hub import hf_hub_download
HF_TOKEN = os.environ.get("HF_TOKEN", None)
MODEL_ID = "google/gemma-2-27b-it"
REPO_ID = "bartowski/gemma-2-27b-it-GGUF"
MODEL_NAME = MODEL_ID.split("/")[-1]
MODEL_FILE = "gemma-2-27b-it-Q4_K_M.gguf"
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
llm = Llama(
model_path=hf_hub_download(
repo_id=os.environ.get(REPO_ID),
filename=os.environ.get(MODEL_FILE),
),
n_ctx=4096,
n_gpu_layers=-1,
chat_format="gemma",
)
TITLE = "<h1><center>Chatbox</center></h1>"
DESCRIPTION = f"""
<h3>MODEL: <a href="https://hf.co/{MODELS}">{MODEL_NAME}</a></h3>
<center>
<p>Gemma is the large language model built by Google.
<br>
Feel free to test without log.
</p>
</center>
"""
CSS = """
.duplicate-button {
margin: auto !important;
color: white !important;
background: black !important;
border-radius: 100vh !important;
}
h3 {
text-align: center;
}
"""
@spaces.GPU(duration=90)
def stream_chat(message: str, history: list, temperature: float, max_new_tokens: int, top_p: float, top_k: int, penalty: float):
print(f'message is - {message}')
print(f'history is - {history}')
conversation = []
for prompt, answer in history:
conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
conversation.append({"role": "user", "content": message})
print(f"Conversation is -\n{conversation}")
output = llm.create_chat_completion(
messages=conversation,
top_k=top_k,
top_p=top_p,
repeat_penalty=penalty,
max_tokens=max_new_tokens,
stream =True,
temperature=temperature,
)
for out in output:
stream = copy.deepcopy(out)
temp += stream["choices"][0]["text"]
yield temp
chatbot = gr.Chatbot(height=600)
with gr.Blocks(css=CSS, theme="soft") as demo:
gr.HTML(TITLE)
gr.HTML(DESCRIPTION)
gr.DuplicateButton(value="Duplicate Space for private use", elem_classes="duplicate-button")
gr.ChatInterface(
fn=stream_chat,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(
minimum=0,
maximum=1,
step=0.1,
value=0.8,
label="Temperature",
render=False,
),
gr.Slider(
minimum=128,
maximum=2048,
step=1,
value=1024,
label="Max Tokens",
render=False,
),
gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.1,
value=0.8,
label="top_p",
render=False,
),
gr.Slider(
minimum=1,
maximum=20,
step=1,
value=20,
label="top_k",
render=False,
),
gr.Slider(
minimum=0.0,
maximum=2.0,
step=0.1,
value=1.0,
label="Repetition penalty",
render=False,
),
],
examples=[
["Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."],
["What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."],
["Tell me a random fun fact about the Roman Empire."],
["Show me a code snippet of a website's sticky header in CSS and JavaScript."],
],
cache_examples=False,
)
if __name__ == "__main__":
demo.launch()