mobicham commited on
Commit
ac3c4e8
1 Parent(s): 26c76e9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -19,7 +19,7 @@ model = HQQModelForCausalLM.from_quantized(model_id)
19
  HQQLinear.set_backend(HQQBackend.PYTORCH_COMPILE) #Optional
20
 
21
  #Text Generation
22
- prompt = "<s> [INST] Who is Elon Musk? [/INST]"
23
 
24
  inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False)
25
  outputs = model.generate(**(inputs.to('cuda')), max_new_tokens=1000)
 
19
  HQQLinear.set_backend(HQQBackend.PYTORCH_COMPILE) #Optional
20
 
21
  #Text Generation
22
+ prompt = "<s> [INST] How do I build a car? [/INST] "
23
 
24
  inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False)
25
  outputs = model.generate(**(inputs.to('cuda')), max_new_tokens=1000)