import json from typing import Any, Dict, List from transformers import LlamaTokenizer import numpy as np # from rewardbench.generative import process_judgement SELF_TAUGHT_WITH_SYSTEM_PROMPT = [ { "role": "system", "content": 'Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user question displayed below. You should choose the assistant that follows the user\'s instructions and answers the user\'s question better. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of their responses. Begin your evaluation by comparing the two responses and provide a short explanation. Avoid any position biases and ensure that the order in which the responses were presented does not influence your decision. Do not allow the length of the responses to influence your evaluation. Do not favor certain names of the assistants. Be as objective as possible. After providing your explanation, output your final verdict by strictly following this format: \\"[[A]]\\" if assistant A is better, \\"[[B]]\\" if assistant B is better.', }, { "role": "user", "content": """[User Question] {input} [The Start of Assistant A's Answer] {response_a} [The End of Assistant A's Answer] [The Start of Assistant B's Answer] {response_b} [The End of Assistant B's Answer] """, }, ] def load_from_jsonl(file_name: str) -> List[dict]: def load_json_line(line: str, i: int, file_name: str): try: return json.loads(line) except: raise ValueError(f"Error in line {i+1}\n{line} of {file_name}") with open(file_name, "r", encoding="UTF-8") as f: data = [load_json_line(line, i, file_name) for i, line in enumerate(f)] return data def save_to_jsonl(data: List[Dict], filename: str, write_mode="w"): with open(filename, write_mode) as file: for item in data: json_str = json.dumps(item) file.write(json_str + "\n") def prepare_vllm_input( input: str, response_a: str, response_b: str, tokenizer: LlamaTokenizer ): conversation = SELF_TAUGHT_WITH_SYSTEM_PROMPT conversation[-1]["content"] = conversation[-1]["content"].format( **{ "input": input, "response_a": response_a, "response_b": response_b, } ) str_input = tokenizer.apply_chat_template( conversation, add_generation_prompt=True, tokenize=False ) return str_input def parse_judgement(generation: str): labels_dict = { "[[A]]": "model_a", "[[B]]": "model_b", "[[C]]": "tie", } if "[[A]]" in generation and "[[B]]" in generation: return None for kw, label in labels_dict.items(): if kw in generation: return label return None