import sklearn import gradio as gr import joblib from transformers import pipeline import requests.exceptions from huggingface_hub import HfApi, hf_hub_download from huggingface_hub.repocard import metadata_load #pipe = joblib.load("https://huggingface.co/spaces/scikit-learn/sentiment-analysis/tree/main/pipeline.pkl") #inputs = [gr.Textbox(value = "The customer service was satisfactory.")] #outputs = [gr.Label(label = "Sentiment")] #title = "Sentiment Analysis" app = gr.Blocks() def load_agent(model_id_1, model_id_2): """ This function load the agent's results """ # Load the metrics metadata_1 = get_metadata(model_id_1) # Get the accuracy results_1 = parse_metrics_accuracy(metadata_1) # Load the metrics metadata_2 = get_metadata(model_id_2) # Get the accuracy results_2 = parse_metrics_accuracy(metadata_2) return model_id_1, results_1, model_id_2, results_2 def parse_metrics_accuracy(meta): if "model-index" not in meta: return None result = meta["model-index"][0]["results"] metrics = result[0]["metrics"] accuracy = metrics[0]["value"] return accuracy def get_metadata(model_id): """ Get the metadata of the model repo :param model_id: :return: metadata """ try: readme_path = hf_hub_download(model_id, filename="README.md") metadata = metadata_load(readme_path) print(metadata) return metadata except requests.exceptions.HTTPError: return None classifier = pipeline("text-classification", model="juliensimon/distilbert-amazon-shoe-reviews") def predict(review): prediction = classifier(review) print(prediction) stars = prediction[0]['label'] stars = (int)(stars.split('_')[1])+1 score = 100*prediction[0]['score'] return "{} {:.0f}%".format("\U00002B50"*stars, score) with app: gr.Markdown( """ # Compare Sentiment Analysis Models Type text to predict sentiment. """) with gr.Row(): model1_input = gr.Textbox(label="Model 1") model2_input = gr.Textbox(label="Model 2") with gr.Row(): inp = gr.Textbox(label="Type text here.",placeholder="The customer service was satisfactory.") out = gr.Textbox(label="Prediction") btn = gr.Button("Run") btn.click(fn=predict, inputs=inp, outputs=out) gr.Markdown( """ Type two models id you want to compare or check examples below. """) with gr.Row(): model1_input = gr.Textbox(label="Model 1") model2_input = gr.Textbox(label="Model 2") with gr.Row(): app_button = gr.Button("Compare models") with gr.Row(): with gr.Column(): model1_name = gr.Markdown() model1_score_output = gr.Textbox(label="Sentiment") with gr.Column(): model2_name = gr.Markdown() model2_score_output = gr.Textbox(label="Sentiment") app_button.click(load_agent, inputs=[model1_input, model2_input], outputs=[model1_name, model1_score_output, model2_name, model2_score_output]) examples = gr.Examples(examples=[["distilbert-base-uncased-finetuned-sst-2-english","distilbert-base-uncased-finetuned-sst-2-english"], ["distilbert-base-uncased-finetuned-sst-2-english", "distilbert-base-uncased-finetuned-sst-2-english"]], inputs=[model1_input, model2_input]) app.launch()