Michielo commited on
Commit
c35485e
1 Parent(s): e93f034

Add information text

Browse files
Files changed (1) hide show
  1. app.py +17 -2
app.py CHANGED
@@ -126,9 +126,24 @@ def predict_toxicity(text, model, tokenizer, device, model_name):
126
  return prediction, inference_time
127
 
128
  def main():
129
- st.set_page_config(page_title="Multi-Model Toxicity Detector", layout="wide")
130
- st.title("Multi-Model Toxicity Detector")
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
  # Load models
133
  hf_token = os.getenv('AT')
134
  models, tokenizers, device = load_models_and_tokenizers(hf_token)
 
126
  return prediction, inference_time
127
 
128
  def main():
129
+ st.set_page_config(page_title="Toxicity Detector Model Comparison", layout="wide")
130
+ st.title("Toxicity Detector Model Comparison")
131
 
132
+ # Explanation text
133
+ st.markdown("""
134
+ ### How It Works
135
+ This application compares various toxicity detection models to classify whether a given text is toxic or not. The models being compared include:
136
+
137
+ - **Tiny-Toxic-Detector**: A new 2M parameter model being released soon.
138
+ - [**Toxic-BERT**](https://huggingface.co/unitary/toxic-bert): A 109M parameter BERT-based model.
139
+ - [**RoBERTa-Toxicity-Classifier**](s-nlp/roberta_toxicity_classifier): A 124M parameter RoBERTa-based model.
140
+ - [**Toxic-Comment-Model**](https://huggingface.co/martin-ha/toxic-comment-model): A 67M parameter DistilBERT-based model.
141
+ - [**ToxicChat-T5**](https://huggingface.co/lmsys/toxicchat-t5-large-v1.0): A 738M parameter T5-based model.
142
+
143
+ Simply enter the text you want to classify, and the app will provide the predictions from each model, along with the inference time.
144
+ Please note these models are (mostly) English-only.
145
+ """)
146
+
147
  # Load models
148
  hf_token = os.getenv('AT')
149
  models, tokenizers, device = load_models_and_tokenizers(hf_token)