AnishHF commited on
Commit
feeba9e
1 Parent(s): 4fe8153

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -11
app.py CHANGED
@@ -12,17 +12,6 @@ quantization_config = QuantoConfig(
12
  model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=quantization_config, device_map="auto", token=access_token)
13
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
14
 
15
- # Load the tokenizer and model
16
- #model_id = "mistralai/Mixtral-8x7B-v0.1"
17
- #tokenizer = AutoTokenizer.from_pretrained(model_id, token=access_token)
18
- #model = AutoModelForCausalLM.from_pretrained(model_id, token=access_token, load_in_4bit=True)
19
- #model = AutoModelForCausalLM.from_pretrained(model_id, token=access_token)
20
- # Initialize the quantizer
21
- #quantizer = bnb.GemmQuantizer(act_bits=8, weight_bits=8)
22
-
23
- # Quantize the model
24
- #model = quantizer(model)
25
-
26
  # Function to generate text using the model
27
  def generate_text(prompt):
28
  text = prompt
 
12
  model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=quantization_config, device_map="auto", token=access_token)
13
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
14
 
 
 
 
 
 
 
 
 
 
 
 
15
  # Function to generate text using the model
16
  def generate_text(prompt):
17
  text = prompt