nroggendorff's picture
Update app.py
ae77a0a verified
raw
history blame
441 Bytes
import gradio as gr
from llama_cpp import Llama
model = Llama.from_pretrained(repo_id="google/gemma-7b-it-GGUF", filename="gemma-7b-it.gguf")
def chat(input_text):
output = model(f"<bos><start_of_turn>user\n{input_text}<end_of_turn>\n<start_of_turn>model\n", stop=["<start_of_turn>model", "\n"])
return output["choices"][0]["text"]
interface = gr.Interface(
fn=chat,
inputs="text",
outputs="text"
)
interface.launch()