TheBloke commited on
Commit
39ee852
1 Parent(s): 6465c31

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -25
README.md CHANGED
@@ -8,15 +8,8 @@ license: apache-2.0
8
  model_creator: MonsterAPI
9
  model_name: Mistral 7B Norobots
10
  model_type: mistral
11
- prompt_template: '<|im_start|>system
12
-
13
- {system_message}<|im_end|>
14
-
15
- <|im_start|>user
16
-
17
- {prompt}<|im_end|>
18
-
19
- <|im_start|>assistant
20
 
21
  '
22
  quantized_by: TheBloke
@@ -69,14 +62,10 @@ These files were quantised using hardware kindly provided by [Massed Compute](ht
69
  <!-- repositories-available end -->
70
 
71
  <!-- prompt-template start -->
72
- ## Prompt template: ChatML
73
 
74
  ```
75
- <|im_start|>system
76
- {system_message}<|im_end|>
77
- <|im_start|>user
78
- {prompt}<|im_end|>
79
- <|im_start|>assistant
80
 
81
  ```
82
 
@@ -246,11 +235,7 @@ from huggingface_hub import InferenceClient
246
  endpoint_url = "https://your-endpoint-url-here"
247
 
248
  prompt = "Tell me about AI"
249
- prompt_template=f'''<|im_start|>system
250
- {system_message}<|im_end|>
251
- <|im_start|>user
252
- {prompt}<|im_end|>
253
- <|im_start|>assistant
254
  '''
255
 
256
  client = InferenceClient(endpoint_url)
@@ -306,11 +291,7 @@ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
306
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
307
 
308
  prompt = "Tell me about AI"
309
- prompt_template=f'''<|im_start|>system
310
- {system_message}<|im_end|>
311
- <|im_start|>user
312
- {prompt}<|im_end|>
313
- <|im_start|>assistant
314
  '''
315
 
316
  print("\n\n*** Generate:")
 
8
  model_creator: MonsterAPI
9
  model_name: Mistral 7B Norobots
10
  model_type: mistral
11
+ prompt_template: '<|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}}
12
+ </s>
 
 
 
 
 
 
 
13
 
14
  '
15
  quantized_by: TheBloke
 
62
  <!-- repositories-available end -->
63
 
64
  <!-- prompt-template start -->
65
+ ## Prompt template: NoRobots
66
 
67
  ```
68
+ <|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}} </s>
 
 
 
 
69
 
70
  ```
71
 
 
235
  endpoint_url = "https://your-endpoint-url-here"
236
 
237
  prompt = "Tell me about AI"
238
+ prompt_template=f'''<|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}} </s>
 
 
 
 
239
  '''
240
 
241
  client = InferenceClient(endpoint_url)
 
291
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
292
 
293
  prompt = "Tell me about AI"
294
+ prompt_template=f'''<|system|> </s> <|user|> {prompt} </s> <|assistant|> {{response}} </s>
 
 
 
 
295
  '''
296
 
297
  print("\n\n*** Generate:")