PeterZentai commited on
Commit
cb64c73
1 Parent(s): a11eb6d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -1
README.md CHANGED
@@ -45,6 +45,8 @@ Cost for inference.
45
  ```python
46
  from transformers import AutoTokenizer, AutoModelForCausalLM
47
 
 
 
48
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map={"": 0}, trust_remote_code=True)
49
  tokenizer = tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=True)
50
 
@@ -63,4 +65,7 @@ with torch.no_grad():
63
  # but it might continue generating irrelevant text. this way the model will stop at the right place
64
  model_response = model.generate(**model_input, max_new_tokens=512, eos_token_id=tokenizer.eos_token_id, )
65
  print(tokenizer.decode(model_result[0], skip_special_tokens=False))
66
- ```
 
 
 
 
45
  ```python
46
  from transformers import AutoTokenizer, AutoModelForCausalLM
47
 
48
+ model_id = "WeeRobots/phi-2-chat-v05"
49
+
50
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map={"": 0}, trust_remote_code=True)
51
  tokenizer = tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True, trust_remote_code=True)
52
 
 
65
  # but it might continue generating irrelevant text. this way the model will stop at the right place
66
  model_response = model.generate(**model_input, max_new_tokens=512, eos_token_id=tokenizer.eos_token_id, )
67
  print(tokenizer.decode(model_result[0], skip_special_tokens=False))
68
+ ```
69
+
70
+ # Non production quality
71
+ Be aware that this model tuning wasn't thoroughly tested, and isn't meant to be used in production, only for experimentation or hobby projects.