File size: 2,695 Bytes
0997d43
 
 
 
75f1b92
7af13f4
da4611f
48193db
7af13f4
 
 
 
75f1b92
7af13f4
 
0997d43
 
75f1b92
0997d43
7af13f4
48193db
7af13f4
 
0997d43
75f1b92
 
0997d43
75f1b92
da4611f
0997d43
7af13f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75f1b92
19964c7
75f1b92
 
19964c7
 
 
 
 
 
7af13f4
 
 
 
d3d7595
7af13f4
06b445f
a14bbf4
48193db
75f1b92
 
 
06b445f
75f1b92
 
 
 
06b445f
 
7af13f4
75f1b92
0997d43
75f1b92
 
 
 
 
7af13f4
75f1b92
0997d43
75f1b92
7af13f4
0997d43
7af13f4
75f1b92
 
7af13f4
 
0997d43
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96

# https://huggingface.co/spaces/CK42/sentiment-model-comparison/blob/main/app.py

# import sklearn
from os import O_ACCMODE
import gradio as gr
import joblib
from transformers import pipeline
import requests.exceptions
from huggingface_hub import HfApi, hf_hub_download
from huggingface_hub.repocard import metadata_load


app = gr.Blocks()

model_id_1 = "juliensimon/distilbert-amazon-shoe-reviews"
model_id_2 = "juliensimon/distilbert-amazon-shoe-reviews"

def load_agent(model_id):
    """
    This function load the agent's results
    """
    # Load the metrics
    metadata = get_metadata(model_id)

    # get predictions
    predictions = predict(model_id)


    return model_id, predictions


def get_metadata(model_id):
    """
    Get the metadata of the model repo
    :param model_id:
    :return: metadata
    """
    try:
        readme_path = hf_hub_download(model_id, filename="README.md")
        metadata = metadata_load(readme_path)
        print(metadata)
        return metadata
    except requests.exceptions.HTTPError:
        return None

# classifier = pipeline("text-classification", model="juliensimon/distilbert-amazon-shoe-reviews")
                
def predict(review, model_id):
        classifier = pipeline("text-classification", model=model_id)
        prediction = classifier(review)
        print(prediction)
        stars = prediction[0]['label']
        stars = (int)(stars.split('_')[1])+1
        score = 100*prediction[0]['score']
        return "{} {:.0f}%".format("\U00002B50"*stars, score)

with app:
    gr.Markdown(
    """
    # Compare Sentiment Analysis Models 
    
    Type text to predict sentiment.
    """)   
    with gr.Row():
        inp_1= gr.Textbox(label="Type text here.",placeholder="The customer service was satisfactory.")
        out_2 = gr.Textbox(label="Prediction")

    
    # gr.Markdown(
    # """
    # Model Predictions
    # """)
    with gr.Row():
      model1_input = gr.Textbox(label="Model 1")
    with gr.Row():
        btn = gr.Button("Prediction for Model 1")
    btn.click(fn=load_agent(model_id_1), inputs=inp_1, outputs=out_2)



    with gr.Row():
      model2_input = gr.Textbox(label="Model 2") 
    with gr.Row():
        btn = gr.Button("Prediction for Model 2")
    btn.click(fn=predict(model_id_2), inputs=inp_1, outputs=out_2)


    # app_button.click(load_agent, inputs=[model1_input, model2_input], outputs=[model1_name, model1_score_output, model2_name, model2_score_output])
    
    # examples = gr.Examples(examples=[["juliensimon/distilbert-amazon-shoe-reviews","juliensimon/distilbert-amazon-shoe-reviews"]],
    #                        inputs=[model1_input, model2_input])

    
app.launch()