import gradio as gr from ctransformers import AutoModelForCausalLM from transformers import AutoTokenizer, pipeline import torch import re import random ### FEEDBACKS UPDATE IN PERSISTENT MEMORY from pathlib import Path from huggingface_hub import CommitScheduler import json JSON_DATASET_DIR = Path("json_dataset") JSON_DATASET_DIR.mkdir(parents=True, exist_ok=True) JSON_DATASET_PATH = JSON_DATASET_DIR / f"feedbacks.json" scheduler = CommitScheduler( repo_id="bmi-labmedinfo/feedbacks", repo_type="dataset", folder_path=JSON_DATASET_DIR, path_in_repo="data", ) def save_json(last_state: dict, pos_or_neg: str) -> None: last_state["feedback"]=pos_or_neg with scheduler.lock: with JSON_DATASET_PATH.open("a") as f: json.dump(last_state, f) f.write("\n") ### /FEEDBACKS # Initialize the model model = AutoModelForCausalLM.from_pretrained("bmi-labmedinfo/Igea-1B-instruct-GGUF", model_file="unsloth.Q4_K_M.gguf", model_type="mistral", hf=True) tokenizer = AutoTokenizer.from_pretrained( "bmi-labmedinfo/Igea-1B-instruct") gen_pipeline = pipeline( "text-generation", model=model, tokenizer=tokenizer ) system_med_msgs = ["Sei un assistente medico virtuale. Offri supporto per la gestione delle richieste mediche e fornisci informazioni mediche.", "Sei un assistente medico virtuale. Offri supporto per questioni mediche.", "Sei un assistente virtuale sanitario. Offri supporto e informazioni su problemi di salute.", "Sei un assistente virtuale per la salute. Fornisci supporto per richieste riguardanti la salute.", "Sei un assistente digitale per la salute. Fornisci supporto su questioni mediche e sanitarie.", "Sei un assistente virtuale per informazioni sanitarie. Fornisci supporto su problemi di salute e benessere.", "Sei un assistente digitale per la gestione delle questioni sanitarie. Rispondi a richieste mediche e fornisci informazioni sanitarie.", "Sei un assistente sanitario digitale. Rispondi a richieste di natura medica e fornisci informazioni sanitarie.", "Sei un assistente sanitario virtuale. Aiuti a rispondere a richieste mediche e fornisci informazioni sanitarie."] alpaca_instruct_prompt = """{} ### Istruzione: {} ### Risposta: {}""" # Define the function to generate text def generate_text(input_text, max_new_tokens=512, temperature=1, system_prompt=""): if len(system_prompt)>0: system_str = system_prompt else: system_str = random.choice(system_med_msgs) prompt = alpaca_instruct_prompt.format(system_str, input_text,"") output = gen_pipeline( prompt, max_new_tokens=max_new_tokens, temperature=temperature, return_full_text = False, forced_eos_token_id=tokenizer.encode("[...]")[1], pad_token_id=tokenizer.eos_token_id ) generated_text = output[0]['generated_text'] generated_text_color = 'blue' split_tentative = generated_text.split("### Risposta:") if len(split_tentative) > 1: generated_text = split_tentative[1] elif '### Istruzione:' in split_tentative[0]: generated_text = "Spiacente, non sono in grado di rispondere." generated_text_color = 'red' return f"{input_text}{generated_text}", {"input_prompt":prompt, "generated_text_raw":output[0]['generated_text'], "generated_text_displayed":generated_text} def positive_feedback(last_generated_text): save_json(last_generated_text,"positive") gr.Info("Feedback collected. Thanks!") def negative_feedback(last_generated_text): save_json(last_generated_text,"negative") gr.Info("Feedback collected. Thanks!") # Create the Gradio interface input_text = gr.Textbox(lines=2, placeholder="Enter your request here...", label="Input Text") system_prompt = gr.Textbox(lines=2, placeholder="Enter custom system prompt...", label="Custom System Prompt") max_new_tokens = gr.Slider(minimum=1, maximum=200, value=100, step=1, label="Max New Tokens") temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature") with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface: last_generated_text = gr.State({"input_prompt":"", "generated_text_raw":"", "generated_text_displayed":""}) gr.Markdown("# Igea Instruct Interface ⚕️🩺") gr.Markdown("🐢💬 To guarantee a reasonable througput (<1 min to answer with default settings), this space employs a **GGUF quantized version of [Igea 1B](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1)**, optimized for **hardware-limited, CPU-only machines** like the free-tier HuggingFace space. Quantized models may result in significant performance degradation and therefore are not representative of the original model capabilities.") gr.Markdown("⚠️ Read the **[bias, risks and limitations](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1#%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8-bias-risks-and-limitations-%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8)** of Igea before use!") input_text.render() with gr.Accordion("Advanced Options", open=False): max_new_tokens.render() temperature.render() system_prompt.render() output = gr.HTML(label="Generated Text",elem_id="outbox") btn = gr.Button("Generate") btn.click(generate_text, [input_text, max_new_tokens, temperature, system_prompt], outputs=[output, last_generated_text]) with gr.Row(): btn_p = gr.Button("👍") btn_n = gr.Button("👎") btn_p.click(positive_feedback, inputs=[last_generated_text], outputs=None) btn_n.click(negative_feedback, inputs=[last_generated_text], outputs=None) # Launch the interface if __name__ == "__main__": iface.launch(inline=True)