HunEval / src /about.py
Bazsalanszky's picture
Test commit
d4be390
raw
history blame
1.82 kB
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
task0 = Task("category_mean", "history", "History")
task1 = Task("category_mean", "grammar", "Grammar")
task2 = Task("category_mean", "logic", "Logic")
task3 = Task("category_mean", "sayings", "Sayings")
task4 = Task("category_mean", "spelling", "Spelling")
task5 = Task("category_mean", "Vocabulary", "Vocabulary")
NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">HunBench leaderboard</h1>"""
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
This leaderboard evaluates the performance of models on the HunBench benchmark. The goal of this benchmark is to evaluate the performance of models on tasks that require a good understanding of the Hungarian language. The benchmark has two key parts. The first one aims to capture the language understanding capabilities of the model, while the second one focuses on the knowledge of the model. The benchmark is divided into several tasks, each evaluating a different aspect of the model's performance. The leaderboard is sorted by the average score of the model on all tasks.
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
## How it works
TODO
## Reproducibility
TODO
"""
EVALUATION_QUEUE_TEXT = """
TODO
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
"""