File size: 1,819 Bytes
eced1b5
 
 
5c5f47c
eced1b5
 
 
 
 
 
 
 
 
 
5c5f47c
d4be390
 
 
 
 
 
eced1b5
 
5c5f47c
 
eced1b5
 
 
d4be390
eced1b5
 
 
d4be390
eced1b5
 
 
5c5f47c
eced1b5
d4be390
eced1b5
d4be390
eced1b5
 
 
 
d4be390
eced1b5
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from dataclasses import dataclass
from enum import Enum


@dataclass
class Task:
    benchmark: str
    metric: str
    col_name: str


# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
    # task_key in the json file, metric_key in the json file, name to display in the leaderboard
    task0 = Task("category_mean", "history", "History")
    task1 = Task("category_mean", "grammar", "Grammar")
    task2 = Task("category_mean", "logic", "Logic")
    task3 = Task("category_mean", "sayings", "Sayings")
    task4 = Task("category_mean", "spelling", "Spelling")
    task5 = Task("category_mean", "Vocabulary", "Vocabulary")


NUM_FEWSHOT = 0  # Change with your few shot
# ---------------------------------------------------


# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">HunBench leaderboard</h1>"""

# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
This leaderboard evaluates the performance of models on the HunBench benchmark. The goal of this benchmark is to evaluate the performance of models on tasks that require a good understanding of the Hungarian language. The benchmark has two key parts. The first one aims to capture the language understanding capabilities of the model, while the second one focuses on the knowledge of the model. The benchmark is divided into several tasks, each evaluating a different aspect of the model's performance. The leaderboard is sorted by the average score of the model on all tasks.
"""

# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = """
## How it works
TODO
## Reproducibility
TODO

"""

EVALUATION_QUEUE_TEXT = """
TODO
"""

CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
"""