import os import bitsandbytes as bnb import torch import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM access_token = os.environ["GATED_ACCESS_TOKEN"] # Load the tokenizer and model model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" tokenizer = AutoTokenizer.from_pretrained(model_id, token=access_token) model = AutoModelForCausalLM.from_pretrained(model_id, token=access_token) # Initialize the quantizer quantizer = bnb.GemmQuantizer(act_bits=4, weight_bits=4) # Quantize the model model = quantizer(model) # Function to generate text using the model def generate_text(prompt): text = prompt inputs = tokenizer(text, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=20) return tokenizer.decode(outputs[0], skip_special_tokens=True) # Create the Gradio interface iface = gr.Interface( fn=generate_text, inputs=[ gr.inputs.Textbox(lines=5, label="Input Prompt"), ], outputs=gr.outputs.Textbox(label="Generated Text"), title="MixTRAL 8x22B Text Generation", description="Use this interface to generate text using the MixTRAL 8x22B language model.", ) # Launch the Gradio interface iface.launch()