Leaderboard / src /about.py
yichao's picture
update mj-bench
9c9b7f5
raw
history blame
No virus
1.63 kB
from dataclasses import dataclass
from enum import Enum
@dataclass
class Task:
benchmark: str
metric: str
col_name: str
# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
# task_key in the json file, metric_key in the json file, name to display in the leaderboard
task0 = Task("anli_r1", "acc", "ANLI")
task1 = Task("logiqa", "acc_norm", "LogiQA")
NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------
# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">MJ-Bench</h1>"""
MJB_LOGO = '<img src="" alt="Logo" style="width: 30%; display: block; margin: auto;">'
# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
# Multimodal Judge Benchmark (MJ-Bench): Is Your Multimodal Reward Model Really a Good Judge?
### Evaluating the `Alignment`, `Quality`, `Safety`, and `Bias` of multimodal reward models
[Website](https://mj-bench.github.io) | [Code](https://github.com/MJ-Bench/MJ-Bench) | [Eval. Dataset](https://huggingface.co/datasets/MJ-Bench/MJ-Bench) | [Results](https://huggingface.co/datasets/MJ-Bench/MJ-Bench-Results) | [Refined Model via RMs](https://huggingface.co/collections/MJ-Bench/aligned-diffusion-model-via-dpo-667f8b71f35c3ff47acafd43) | [Paper](https://arxiv.org) | Total models: {}
"""
# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
"""
EVALUATION_QUEUE_TEXT = """
"""
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
"""
ABOUT_TEXT = """
"""