File size: 1,631 Bytes
3c6c62c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b650828
 
 
3c6c62c
 
 
b650828
 
 
3c6c62c
 
 
 
 
 
 
b650828
3c6c62c
b650828
 
 
3c6c62c
 
b650828
3c6c62c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from dataclasses import dataclass
from enum import Enum

@dataclass
class Task:
    benchmark: str
    metric: str
    col_name: str


# Select your tasks here
# ---------------------------------------------------
class Tasks(Enum):
    # task_key in the json file, metric_key in the json file, name to display in the leaderboard 
    task0 = Task("anli_r1", "acc", "ANLI")
    task1 = Task("logiqa", "acc_norm", "LogiQA")

NUM_FEWSHOT = 0 # Change with your few shot
# ---------------------------------------------------



# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">MJ-Bench</h1>"""

MJB_LOGO = '<img src="" alt="Logo" style="width: 30%; display: block; margin: auto;">'

# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
# Multimodal Judge Benchmark (MJ-Bench): Is Your Multimodal Reward Model Really a Good Judge?
### Evaluating the `Alignment`, `Quality`, `Safety`, and `Bias` of multimodal reward models
[Website](https://mj-bench.github.io) | [Code](https://github.com/MJ-Bench/MJ-Bench) | [Eval. Dataset](https://huggingface.co/datasets/MJ-Bench/MJ-Bench) | [Results](https://huggingface.co/datasets/MJ-Bench/MJ-Bench-Results) | [Refined Model via RMs](https://huggingface.co/collections/MJ-Bench/aligned-diffusion-model-via-dpo-667f8b71f35c3ff47acafd43) | [Paper](https://arxiv.org) | Total models: {}
"""

# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
"""

EVALUATION_QUEUE_TEXT = """
"""

CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
"""


ABOUT_TEXT = """


"""