AnishHF commited on
Commit
a1b3c51
1 Parent(s): 0b7787a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -6,15 +6,14 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
6
 
7
  access_token = os.environ["GATED_ACCESS_TOKEN"]
8
 
9
- # specify how to quantize the model
10
  quantization_config = BitsAndBytesConfig(
11
  load_in_4bit=True,
12
  bnb_4bit_quant_type="nf4",
13
  bnb_4bit_compute_dtype="torch.float16",
14
  )
15
 
16
- model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1", quantization_config=True, device_map="auto")
17
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
18
 
19
  # Load the tokenizer and model
20
  #model_id = "mistralai/Mixtral-8x7B-v0.1"
 
6
 
7
  access_token = os.environ["GATED_ACCESS_TOKEN"]
8
 
 
9
  quantization_config = BitsAndBytesConfig(
10
  load_in_4bit=True,
11
  bnb_4bit_quant_type="nf4",
12
  bnb_4bit_compute_dtype="torch.float16",
13
  )
14
 
15
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=True, device_map="auto")
16
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
17
 
18
  # Load the tokenizer and model
19
  #model_id = "mistralai/Mixtral-8x7B-v0.1"