AnishHF commited on
Commit
8b8d45c
1 Parent(s): 538b5d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -9,12 +9,13 @@ access_token = os.environ["GATED_ACCESS_TOKEN"]
9
  # Load the tokenizer and model
10
  model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
11
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=access_token)
12
- model = AutoModelForCausalLM.from_pretrained(model_id, token=access_token)
 
13
  # Initialize the quantizer
14
- quantizer = bnb.GemmQuantizer(act_bits=8, weight_bits=8)
15
 
16
  # Quantize the model
17
- model = quantizer(model)
18
 
19
  # Function to generate text using the model
20
  def generate_text(prompt):
 
9
  # Load the tokenizer and model
10
  model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
11
  tokenizer = AutoTokenizer.from_pretrained(model_id, token=access_token)
12
+ model = AutoModelForCausalLM.from_pretrained(model_id, token=access_token, load_in_4bit=True)
13
+ #model = AutoModelForCausalLM.from_pretrained(model_id, token=access_token)
14
  # Initialize the quantizer
15
+ #quantizer = bnb.GemmQuantizer(act_bits=8, weight_bits=8)
16
 
17
  # Quantize the model
18
+ #model = quantizer(model)
19
 
20
  # Function to generate text using the model
21
  def generate_text(prompt):