from huggingface_hub import InferenceClient import gradio as gr import os API_URL = { "Mistral" : "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.3", "Mixtral" : "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1", "Mathstral" : "https://api-inference.huggingface.co/models/mistralai/mathstral-7B-v0.1", } HF_TOKEN = os.environ['HF_TOKEN'] Hinglish_Prompt = os.environ['Hinglish_Prompt'] mistralClient = InferenceClient( API_URL["Mistral"], headers = {"Authorization" : f"Bearer {HF_TOKEN}"}, ) mixtralClient = InferenceClient( model = API_URL["Mixtral"], headers = {"Authorization" : f"Bearer {HF_TOKEN}"}, ) mathstralClient = InferenceClient( model = API_URL["Mathstral"], headers = {"Authorization" : f"Bearer {HF_TOKEN}"}, ) def format_prompt(message, history, enable_hinglish=False): prompt = "" # Adding the Hinglish prompt if enable_hinglish and not any("You are a Hinglish LLM." in user_prompt for user_prompt, bot_response in history): prompt += Hinglish_Prompt for user_prompt, bot_response in history: prompt += f"[INST] {user_prompt} [/INST]" prompt += f" {bot_response} " prompt += f"[INST] {message} [/INST]" return prompt def generate(prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, model = "Mistral", enable_hinglish=False): if model == "Mathstral" and enable_hinglish: raise gr.Error("Mathstral decoder doesn't support Hinglish") enable_hinglish=False # Selecting model to be used if(model == "Mistral"): client = mistralClient elif(model == "Mixstral"): client = mixtralClient elif(model == "Mathstral"): client = mixtralClient temperature = float(temperature) # Generation arguments if temperature < 1e-2: temperature = 1e-2 top_p = float(top_p) generate_kwargs = dict( temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42, ) formatted_prompt = format_prompt(prompt, history, enable_hinglish) stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text yield output return output additional_inputs=[ gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ), gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ), gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ), gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", ), gr.Dropdown( choices = ["Mistral","Mixtral", "Mathstral"], value = "Mistral", label = "Model to be used", interactive=True, info = "Mistral : for general-purpose conversations, "+ "Mixtral : for faster and accurate conversations, "+ "Mathstral : for mathematical and scientific reasoning" ), gr.Checkbox( label="Hinglish", value=False, interactive=True, info="Enables the MistralTalk to talk in Hinglish (Combination of Hindi and English)", ) ] css = """ #mkd { height: 500px; overflow: auto; border: 1px solid #ccc; } """ with gr.Blocks(css=css) as demo: gr.HTML("

MistralTalk🗣️

") gr.HTML("

In this demo, you can chat with Mistral AI advanced models 💬

") gr.HTML("

Learn more about the model here. 📚

") gr.ChatInterface( generate, additional_inputs=additional_inputs, theme = gr.themes.Soft(), examples=[["What is the secret to life?"], ["How the universe works?"],["What is quantum mechanics?"],["Do you belive in after life?"],["Albert likes to surf every week. Each surfing session lasts for 4 hours and costs $20 per hour. How much would Albert spend in 5 weeks?"]] ) demo.queue(max_size=100).launch(debug=True)