mishig HF staff commited on
Commit
7660429
1 Parent(s): 75c1d95

Improve python snippet

Browse files
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte CHANGED
@@ -98,14 +98,12 @@ let out = "";
98
  for await (const chunk of inference.chatCompletionStream({
99
  model: "${conversation.model.id}",
100
  messages: ${formattedMessages({ sep: ",\n ", start: "[\n ", end: "\n ]" })},
101
- ${formattedConfig({ sep: ",\n ", start: "", end: "" })},
102
- seed: 0,
103
  })) {
104
  if (chunk.choices && chunk.choices.length > 0) {
105
  const newContent = chunk.choices[0].delta.content;
106
  out += newContent;
107
- console.clear();
108
- console.log(out);
109
  }
110
  }`,
111
  });
@@ -120,8 +118,7 @@ const inference = new HfInference("your access token")
120
  const out = await inference.chatCompletion({
121
  model: "${conversation.model.id}",
122
  messages: ${formattedMessages({ sep: ",\n ", start: "[\n ", end: "\n ]" })},
123
- ${formattedConfig({ sep: ",\n ", start: "", end: "" })},
124
- seed: 0,
125
  });
126
 
127
  console.log(out.choices[0].message);`,
@@ -139,36 +136,37 @@ console.log(out.choices[0].message);`,
139
  .join(sep) +
140
  end;
141
 
142
- const formattedConfig = ({ sep, start, end }: MessagesJoiner) =>
143
  start +
144
  Object.entries(conversation.config)
145
- .map(([key, val]) => `${key}: ${val}`)
146
  .join(sep) +
147
  end;
148
 
149
  const snippets: Snippet[] = [];
150
  snippets.push({
151
- label: "Install huggingface_hub",
152
  language: "http",
153
- code: `pip install huggingface_hub`,
154
  });
155
  if (conversation.streaming) {
156
  snippets.push({
157
  label: "Streaming API",
158
  code: `from huggingface_hub import InferenceClient
159
 
160
- model_id="${conversation.model.id}"
161
- hf_token = "your HF token"
162
- inference_client = InferenceClient(model_id, token=hf_token)
163
 
164
- output = ""
165
 
166
- messages = ${formattedMessages({ sep: ",\n ", start: `[\n `, end: `\n]` })}
 
 
 
 
 
167
 
168
- for token in client.chat_completion(messages, stream=True, ${formattedConfig({ sep: ", ", start: "", end: "" })}):
169
- new_content = token.choices[0].delta.content
170
- print(new_content, end="")
171
- output += new_content`,
172
  });
173
  } else {
174
  // non-streaming
@@ -177,12 +175,15 @@ for token in client.chat_completion(messages, stream=True, ${formattedConfig({ s
177
  code: `from huggingface_hub import InferenceClient
178
 
179
  model_id="${conversation.model.id}"
180
- hf_token = "your HF token"
181
- inference_client = InferenceClient(model_id, token=hf_token)
182
 
183
- messages = ${formattedMessages({ sep: ",\n ", start: `[\n `, end: `\n]` })}
184
 
185
- output = inference_client.chat_completion(messages, ${formattedConfig({ sep: ", ", start: "", end: "" })})
 
 
 
 
186
 
187
  print(output.choices[0].message)`,
188
  });
 
98
  for await (const chunk of inference.chatCompletionStream({
99
  model: "${conversation.model.id}",
100
  messages: ${formattedMessages({ sep: ",\n ", start: "[\n ", end: "\n ]" })},
101
+ ${formattedConfig({ sep: ",\n ", start: "", end: "" })}
 
102
  })) {
103
  if (chunk.choices && chunk.choices.length > 0) {
104
  const newContent = chunk.choices[0].delta.content;
105
  out += newContent;
106
+ console.log(newContent);
 
107
  }
108
  }`,
109
  });
 
118
  const out = await inference.chatCompletion({
119
  model: "${conversation.model.id}",
120
  messages: ${formattedMessages({ sep: ",\n ", start: "[\n ", end: "\n ]" })},
121
+ ${formattedConfig({ sep: ",\n ", start: "", end: "" })}
 
122
  });
123
 
124
  console.log(out.choices[0].message);`,
 
136
  .join(sep) +
137
  end;
138
 
139
+ const formattedConfig = ({ sep, start, end, connector }: MessagesJoiner & { connector: string }) =>
140
  start +
141
  Object.entries(conversation.config)
142
+ .map(([key, val]) => `${key}${connector}${val}`)
143
  .join(sep) +
144
  end;
145
 
146
  const snippets: Snippet[] = [];
147
  snippets.push({
148
+ label: "Install the latest huggingface_hub",
149
  language: "http",
150
+ code: `pip install huggingface_hub --upgrade`,
151
  });
152
  if (conversation.streaming) {
153
  snippets.push({
154
  label: "Streaming API",
155
  code: `from huggingface_hub import InferenceClient
156
 
157
+ client = InferenceClient(api_key="your HF token")
 
 
158
 
159
+ messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
160
 
161
+ output = client.chat.completions.create(
162
+ model="${conversation.model.id}",
163
+ messages=messages,
164
+ stream=True,
165
+ ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
166
+ )
167
 
168
+ for chunk in output:
169
+ print(chunk.choices[0].delta.content)`,
 
 
170
  });
171
  } else {
172
  // non-streaming
 
175
  code: `from huggingface_hub import InferenceClient
176
 
177
  model_id="${conversation.model.id}"
178
+ client = InferenceClient(api_key="your HF token")
 
179
 
180
+ messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
181
 
182
+ output = client.chat.completions.create(
183
+ model="${conversation.model.id}",
184
+ messages=messages,
185
+ ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
186
+ )
187
 
188
  print(output.choices[0].message)`,
189
  });