xshubhamx commited on
Commit
7529c15
β€’
1 Parent(s): 43dac43

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +6 -0
  2. multiclass_sensitivity_macro.py +134 -0
  3. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import evaluate
2
+ from evaluate.utils import launch_gradio_widget
3
+
4
+
5
+ module = evaluate.load("xshubhamx/multiclass_sensitivity_macro")
6
+ launch_gradio_widget(module)
multiclass_sensitivity_macro.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """TODO: Add a description here."""
15
+
16
+ import evaluate
17
+ import datasets
18
+
19
+
20
+ # TODO: Add BibTeX citation
21
+ _CITATION = """\
22
+ @InProceedings{huggingface:module,
23
+ title = {A great new module},
24
+ authors={huggingface, Inc.},
25
+ year={2020}
26
+ }
27
+ """
28
+
29
+ # TODO: Add description of the module here
30
+ _DESCRIPTION = """\
31
+ This new module is designed to solve this great ML task and is crafted with a lot of care.
32
+ """
33
+
34
+
35
+ # TODO: Add description of the arguments of the module here
36
+ _KWARGS_DESCRIPTION = """
37
+ Calculates how good are predictions given some references, using certain scores
38
+ Args:
39
+ predictions: list of predictions to score. Each predictions
40
+ should be a string with tokens separated by spaces.
41
+ references: list of reference for each prediction. Each
42
+ reference should be a string with tokens separated by spaces.
43
+ Returns:
44
+ accuracy: description of the first score,
45
+ another_score: description of the second score,
46
+ Examples:
47
+ Examples should be written in doctest format, and should illustrate how
48
+ to use the function.
49
+
50
+ >>> my_new_module = evaluate.load("my_new_module")
51
+ >>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
52
+ >>> print(results)
53
+ {'accuracy': 1.0}
54
+ """
55
+
56
+ # TODO: Define external resources urls if needed
57
+ BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
58
+
59
+
60
+ @evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
61
+ class multiclass_sensitivity_macro(evaluate.Metric):
62
+ """TODO: Short description of my evaluation module."""
63
+
64
+ def _info(self):
65
+ # TODO: Specifies the evaluate.EvaluationModuleInfo object
66
+ return evaluate.MetricInfo(
67
+ # This is the description that will appear on the modules page.
68
+ module_type="metric",
69
+ description=_DESCRIPTION,
70
+ citation=_CITATION,
71
+ inputs_description=_KWARGS_DESCRIPTION,
72
+ # This defines the format of each prediction and reference
73
+ features=datasets.Features({
74
+ 'predictions': datasets.Value('int64'),
75
+ 'references': datasets.Value('int64'),
76
+ }),
77
+ # Homepage of the module for documentation
78
+ homepage="http://module.homepage",
79
+ # Additional links to the codebase or references
80
+ codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
81
+ reference_urls=["http://path.to.reference.url/new_module"]
82
+ )
83
+
84
+ def _download_and_prepare(self, dl_manager):
85
+ """Optional: download external resources useful to compute the scores"""
86
+ # TODO: Download external resources if needed
87
+ pass
88
+
89
+ def _compute(self, predictions, references):
90
+ """Returns the scores"""
91
+ # TODO: Compute the different scores of the module
92
+
93
+ from collections import defaultdict
94
+ """
95
+ Calculate multiclass sensitivity (recall) for each class,
96
+ as well as weighted and macro averages.
97
+
98
+ Args:
99
+ references (list): List of true class labels.
100
+ predictions (list): List of predicted class labels.
101
+
102
+ Returns:
103
+ tuple: Class-wise sensitivity, weighted average sensitivity, macro average sensitivity.
104
+ """
105
+ # Count true positives, false negatives, and true instance counts for each class
106
+ tp_counts = defaultdict(int)
107
+ fn_counts = defaultdict(int)
108
+ true_counts = defaultdict(int)
109
+
110
+ for true_label, pred_label in zip(references, predictions):
111
+ true_counts[true_label] += 1
112
+ if true_label == pred_label:
113
+ tp_counts[true_label] += 1
114
+ else:
115
+ fn_counts[true_label] += 1
116
+
117
+ # Calculate class-wise sensitivity
118
+ class_sensitivities = {}
119
+ total_weight = sum(true_counts.values())
120
+ weighted_sum = 0.0
121
+
122
+ for class_label in set(references):
123
+ tp = tp_counts[class_label]
124
+ fn = fn_counts[class_label]
125
+ true_instances = true_counts[class_label]
126
+
127
+ sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0
128
+ class_sensitivities[class_label] = sensitivity
129
+ weighted_sum += sensitivity * true_instances
130
+
131
+ macro_avg_sensitivity = sum(class_sensitivities.values()) / len(class_sensitivities) if class_sensitivities else 0
132
+ return {
133
+ "macro_sensitivity": macro_avg_sensitivity,
134
+ }
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ git+https://github.com/huggingface/evaluate@main