mishig HF staff commited on
Commit
e94cf4d
2 Parent(s): 9099850 d48b731

Show all specified generation configs in code snippets (#34)

Browse files
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte CHANGED
@@ -31,7 +31,7 @@
31
  bash: getBashSnippets(conversation)
32
  };
33
 
34
- let selectedLanguage: Language = 'javascript';
35
 
36
  function getMessages() {
37
  const placeholder = [{ role: 'user', content: 'Tell me a story' }];
@@ -47,10 +47,20 @@
47
  }
48
 
49
  function getJavascriptSnippets(conversation: Conversation) {
50
- let messages = getMessages()
51
- .map(({ role, content }) => `{ role: "${role}", content: "${content}" }`)
52
- .join(',\n ');
53
- messages = `[\n ${messages}\n ]`;
 
 
 
 
 
 
 
 
 
 
54
  const snippets: Snippet[] = [];
55
  snippets.push({
56
  label: 'Install @huggingface/inference',
@@ -70,9 +80,8 @@ let out = "";
70
 
71
  for await (const chunk of inference.chatCompletionStream({
72
  model: "${conversation.model.id}",
73
- messages: ${messages},
74
- temperature: ${conversation.config.temperature},
75
- max_tokens: ${conversation.config.maxTokens},
76
  seed: 0,
77
  })) {
78
  if (chunk.choices && chunk.choices.length > 0) {
@@ -93,9 +102,8 @@ const inference = new HfInference("your access token")
93
 
94
  const out = await inference.chatCompletion({
95
  model: "${conversation.model.id}",
96
- messages: ${messages},
97
- temperature: ${conversation.config.temperature},
98
- max_tokens: ${conversation.config.maxTokens},
99
  seed: 0,
100
  });
101
 
@@ -107,10 +115,20 @@ console.log(out.choices[0].message);`
107
  }
108
 
109
  function getPythonSnippets(conversation: Conversation) {
110
- let messages = getMessages()
111
- .map(({ role, content }) => `{ "role": "${role}", "content": "${content}" }`)
112
- .join(',\n ');
113
- messages = `[\n ${messages}\n]`;
 
 
 
 
 
 
 
 
 
 
114
  const snippets: Snippet[] = [];
115
  snippets.push({
116
  label: 'Install huggingface_hub',
@@ -128,9 +146,9 @@ inference_client = InferenceClient(model_id, token=hf_token)
128
 
129
  output = ""
130
 
131
- messages = ${messages}
132
 
133
- for token in client.chat_completion(messages, stream=True, temperature=${conversation.config.temperature}, max_tokens=${conversation.config.maxTokens}):
134
  new_content = token.choices[0].delta.content
135
  print(new_content, end="")
136
  output += new_content`
@@ -145,9 +163,9 @@ model_id="${conversation.model.id}"
145
  hf_token = "your HF token"
146
  inference_client = InferenceClient(model_id, token=hf_token)
147
 
148
- messages = ${messages}
149
 
150
- output = inference_client.chat_completion(messages, temperature=${conversation.config.temperature}, max_tokens=${conversation.config.maxTokens})
151
 
152
  print(output.choices[0].message)`
153
  });
@@ -157,10 +175,20 @@ print(output.choices[0].message)`
157
  }
158
 
159
  function getBashSnippets(conversation: Conversation) {
160
- let messages = getMessages()
161
- .map(({ role, content }) => `{ "role": "${role}", "content": "${content}" }`)
162
- .join(',\n ');
163
- messages = `[\n ${messages}\n]`;
 
 
 
 
 
 
 
 
 
 
164
  const snippets: Snippet[] = [];
165
 
166
  if (conversation.streaming) {
@@ -171,9 +199,8 @@ print(output.choices[0].message)`
171
  --header 'Content-Type: application/json' \
172
  --data '{
173
  "model": "meta-llama/Meta-Llama-3-8B-Instruct",
174
- "messages": ${messages},
175
- "temperature": ${conversation.config.temperature},
176
- "max_tokens": ${conversation.config.maxTokens},
177
  "stream": true
178
  }'`
179
  });
@@ -186,9 +213,8 @@ print(output.choices[0].message)`
186
  --header 'Content-Type: application/json' \
187
  --data '{
188
  "model": "meta-llama/Meta-Llama-3-8B-Instruct",
189
- "messages": ${messages},
190
- "temperature": ${conversation.config.temperature},
191
- "max_tokens": ${conversation.config.maxTokens}
192
  }'`
193
  });
194
  }
 
31
  bash: getBashSnippets(conversation)
32
  };
33
 
34
+ let selectedLanguage: Language = 'bash';
35
 
36
  function getMessages() {
37
  const placeholder = [{ role: 'user', content: 'Tell me a story' }];
 
47
  }
48
 
49
  function getJavascriptSnippets(conversation: Conversation) {
50
+ const formattedMessages = ({ sep, start, end }) =>
51
+ start +
52
+ getMessages()
53
+ .map(({ role, content }) => `{ role: "${role}", content: "${content}" }`)
54
+ .join(sep) +
55
+ end;
56
+
57
+ const formattedConfig = ({ sep, start, end }) =>
58
+ start +
59
+ Object.entries(conversation.config)
60
+ .map(([key, val]) => `${key}: ${val}`)
61
+ .join(sep) +
62
+ end;
63
+
64
  const snippets: Snippet[] = [];
65
  snippets.push({
66
  label: 'Install @huggingface/inference',
 
80
 
81
  for await (const chunk of inference.chatCompletionStream({
82
  model: "${conversation.model.id}",
83
+ messages: ${formattedMessages({ sep: '\n ', start: '[\n ', end: '\n ]' })},
84
+ ${formattedConfig({ sep: ',\n ', start: '', end: '' })},
 
85
  seed: 0,
86
  })) {
87
  if (chunk.choices && chunk.choices.length > 0) {
 
102
 
103
  const out = await inference.chatCompletion({
104
  model: "${conversation.model.id}",
105
+ messages: ${formattedMessages({ sep: '\n ', start: '[\n ', end: '\n ]' })},
106
+ ${formattedConfig({ sep: ',\n ', start: '', end: '' })},
 
107
  seed: 0,
108
  });
109
 
 
115
  }
116
 
117
  function getPythonSnippets(conversation: Conversation) {
118
+ const formattedMessages = ({ sep, start, end }) =>
119
+ start +
120
+ getMessages()
121
+ .map(({ role, content }) => `{ "role": "${role}", "content": "${content}" }`)
122
+ .join(sep) +
123
+ end;
124
+
125
+ const formattedConfig = ({ sep, start, end }) =>
126
+ start +
127
+ Object.entries(conversation.config)
128
+ .map(([key, val]) => `${key}: ${val}`)
129
+ .join(sep) +
130
+ end;
131
+
132
  const snippets: Snippet[] = [];
133
  snippets.push({
134
  label: 'Install huggingface_hub',
 
146
 
147
  output = ""
148
 
149
+ messages = ${formattedMessages({ sep: ',\n ', start: `[\n `, end: `\n]` })}
150
 
151
+ for token in client.chat_completion(messages, stream=True, ${formattedConfig({ sep: ', ', start: '', end: '' })}):
152
  new_content = token.choices[0].delta.content
153
  print(new_content, end="")
154
  output += new_content`
 
163
  hf_token = "your HF token"
164
  inference_client = InferenceClient(model_id, token=hf_token)
165
 
166
+ messages = ${formattedMessages({ sep: ',\n ', start: `[\n `, end: `\n]` })}
167
 
168
+ output = inference_client.chat_completion(messages, ${formattedConfig({ sep: ', ', start: '', end: '' })})
169
 
170
  print(output.choices[0].message)`
171
  });
 
175
  }
176
 
177
  function getBashSnippets(conversation: Conversation) {
178
+ const formattedMessages = ({ sep, start, end }) =>
179
+ start +
180
+ getMessages()
181
+ .map(({ role, content }) => `{ "role": "${role}", "content": "${content}" }`)
182
+ .join(sep) +
183
+ end;
184
+
185
+ const formattedConfig = ({ sep, start, end }) =>
186
+ start +
187
+ Object.entries(conversation.config)
188
+ .map(([key, val]) => `${key}: ${val}`)
189
+ .join(sep) +
190
+ end;
191
+
192
  const snippets: Snippet[] = [];
193
 
194
  if (conversation.streaming) {
 
199
  --header 'Content-Type: application/json' \
200
  --data '{
201
  "model": "meta-llama/Meta-Llama-3-8B-Instruct",
202
+ "messages": ${formattedMessages({ sep: ',\n ', start: `[\n `, end: `\n]` })},
203
+ ${formattedConfig({ sep: ',\n ', start: '', end: '' })},
 
204
  "stream": true
205
  }'`
206
  });
 
213
  --header 'Content-Type: application/json' \
214
  --data '{
215
  "model": "meta-llama/Meta-Llama-3-8B-Instruct",
216
+ "messages": ${formattedMessages({ sep: ',\n ', start: `[\n `, end: `\n]` })},
217
+ ${formattedConfig({ sep: ',\n ', start: '', end: '' })}
 
218
  }'`
219
  });
220
  }