Spaces:
Sleeping
Sleeping
File size: 5,359 Bytes
bf4511d 524455a 26fc4d9 524455a cee3ff2 26fc4d9 31cc7d7 49876da 31cc7d7 524455a 26fc4d9 1f24ef3 524455a 26fc4d9 31cc7d7 26fc4d9 31cc7d7 524455a 26fc4d9 524455a 26fc4d9 31cc7d7 26fc4d9 e89bb60 26fc4d9 31cc7d7 524455a fc439e1 31cc7d7 26fc4d9 524455a c67fdf8 26fc4d9 fb3e46a 26fc4d9 524455a 26fc4d9 524455a 26fc4d9 524455a 26fc4d9 21ff7eb 26fc4d9 470317d 31cc7d7 26fc4d9 524455a 26fc4d9 33a1100 524455a 26fc4d9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import os
from threading import Thread
from typing import Iterator
import gradio as gr
import spaces
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
import huggingface_hub
import time
# Obtener token de Hugging Face
token = os.environ.get("HUGGINGFACE_HUB_TOKEN", None)
huggingface_hub.login(token=token)
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
HF_TOKEN = os.environ.get("HUGGINGFACE_HUB_TOKEN", None)
DESCRIPTION = """\
# Llama-2 13B Chat
This Space demonstrates model [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) by Meta, a Llama 2 model with 13B parameters fine-tuned for chat instructions. Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
Para más detalles sobre la familia de modelos Llama 2 y cómo usarlos con `transformers`, echa un vistazo [a nuestro post de blog](https://huggingface.co/blog/llama2).
Buscando un modelo aún más potente? ¡Echa un vistazo a la demo del modelo grande [**70B**](https://huggingface.co/spaces/ysharma/Explore_llamav2_with_TGI)!
Para un modelo más pequeño que puedas ejecutar en muchas GPU, echa un vistazo a nuestra [demo del modelo 7B](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat).
"""
LICENSE = """
<p/>
---
Como un trabajo derivado de [Llama-2-13b-chat](https://huggingface.co/meta-llama/Llama-2-13b-chat) de Meta,
esta demo está gobernada por la [licencia original](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/LICENSE.txt) y la [política de uso aceptable](https://huggingface.co/spaces/huggingface-projects/llama-2-13b-chat/blob/main/USE_POLICY.md).
"""
if not torch.cuda.is_available():
DESCRIPTION += "\n<p>Running on CPU. This demo does not work on CPU.</p>"
model = None
tokenizer = None
@spaces.GPU
def load_model():
global model, tokenizer
model_id = "meta-llama/Llama-2-13b-chat-hf"
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.use_default_system_prompt = False
@spaces.GPU(duration=90)
def generate(
message: str,
chat_history: list[tuple[str, str]],
system_prompt: str,
max_new_tokens: int = 1024,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2,
) -> Iterator[str]:
global model, tokenizer
if model is None or tokenizer is None:
load_model()
conversation = []
if system_prompt:
conversation.append({"role": "system", "content": system_prompt})
for user, assistant in chat_history:
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
{"input_ids": input_ids},
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_beams=1,
repetition_penalty=repetition_penalty,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
if os.environ.get("GRADIO_APP") == "True":
load_model()
chat_interface = gr.ChatInterface(
fn=generate,
additional_inputs=[
gr.Textbox(label="System prompt", lines=6),
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.6,
),
gr.Slider(
label="Top-p (nucleus sampling)",
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.9,
),
gr.Slider(
label="Top-k",
minimum=1,
maximum=1000,
step=1,
value=50,
),
gr.Slider(
label="Repetition penalty",
minimum=1.0,
maximum=2.0,
step=0.05,
value=1.2,
),
],
cache_examples=False,
)
with gr.Blocks(css="style.css", fill_height=True) as demo:
gr.Markdown(DESCRIPTION)
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
chat_interface.render()
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.queue(max_size=20).launch()
|