import gradio as gr import json from datetime import datetime from theme import TufteInspired import uuid from huggingface_hub import InferenceClient from openai import OpenAI from huggingface_hub import get_token, login from prompts import detailed_genre_description_prompt, basic_prompt import random import os # Ensure you're logged in to Hugging Face login(get_token()) # Define available models MODELS = [ "meta-llama/Meta-Llama-3-70B-Instruct", "mistralai/Mixtral-8x7B-Instruct-v0.1", "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", ] CHOSEN_MODEL = None def get_random_model(): global CHOSEN_MODEL model = random.choice(MODELS) CHOSEN_MODEL = model return model def create_client(model_id): return OpenAI( base_url=f"https://api-inference.huggingface.co/models/{model_id}/v1", api_key=get_token(), ) client = OpenAI( base_url="https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct/v1", api_key=get_token(), ) def generate_prompt(): if random.choice([True, False]): return detailed_genre_description_prompt() else: return basic_prompt() def get_and_store_prompt(): prompt = generate_prompt() print(prompt) # Keep this for debugging return prompt def generate_blurb(prompt): model_id = get_random_model() client = create_client(model_id) max_tokens = random.randint(100, 1000) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "user", "content": prompt}, ], stream=True, max_tokens=max_tokens, ) full_text = "" for message in chat_completion: full_text += message.choices[0].delta.content yield full_text # Function to log blurb and vote def log_blurb_and_vote(prompt, blurb, vote, user_info: gr.OAuthProfile | None, *args): user_id = user_info.username if user_info is not None else str(uuid.uuid4()) log_entry = { "timestamp": datetime.now().isoformat(), "prompt": prompt, "blurb": blurb, "vote": vote, "user_id": user_id, "model": CHOSEN_MODEL, } with open("blurb_log.jsonl", "a") as f: f.write(json.dumps(log_entry) + "\n") gr.Info("Thank you for voting!") return f"Logged: {vote} by user {user_id}" # Create custom theme tufte_theme = TufteInspired() # Create Gradio interface with gr.Blocks(theme=tufte_theme) as demo: gr.Markdown("

Would you read this book?

") gr.Markdown( """

Looking for your next summer read? Would you read a book based on this LLM generated blurb?
Your vote will be added to this Hugging Face dataset

""" ) # Add the login button login_btn = gr.LoginButton() with gr.Row(): generate_btn = gr.Button("Create a book", variant="primary") prompt_state = gr.State() blurb_output = gr.Markdown(label="Book blurb") with gr.Row(visible=False) as voting_row: upvote_btn = gr.Button("👍 would read") downvote_btn = gr.Button("👎 wouldn't read") vote_output = gr.Textbox(label="Vote Status", interactive=False, visible=False) def generate_and_show(prompt): return gr.Markdown.update(value="Generating..."), gr.Row(visible=False) def show_voting_buttons(blurb): return blurb, gr.Row(visible=True) generate_btn.click(get_and_store_prompt, outputs=prompt_state).then( generate_and_show, inputs=prompt_state, outputs=[blurb_output, voting_row] ).then(generate_blurb, inputs=prompt_state, outputs=blurb_output).then( show_voting_buttons, inputs=blurb_output, outputs=[blurb_output, voting_row] ) upvote_btn.click( log_blurb_and_vote, inputs=[ prompt_state, blurb_output, gr.Textbox(value="upvote", visible=False), login_btn, ], outputs=vote_output, ) downvote_btn.click( log_blurb_and_vote, inputs=[ prompt_state, blurb_output, gr.Textbox(value="downvote", visible=False), login_btn, ], outputs=vote_output, ) if __name__ == "__main__": demo.launch(debug=True)