prince-canuma commited on
Commit
2d8252f
1 Parent(s): eb91808

fix model name on examples

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -40,7 +40,7 @@ response = generate(model, tokenizer, prompt="hello", verbose=True)
40
  ```python
41
  from mlx_lm import load, generate
42
 
43
- model, tokenizer = load("mlx-community/c4ai-command-r-v01-4bit")
44
 
45
  # Format message with the command-r tool use template
46
  conversation = [
@@ -138,7 +138,7 @@ Action:```json
138
  ```python
139
  from mlx_lm import load, generate
140
 
141
- model, tokenizer = load("mlx-community/c4ai-command-r-v01-4bit")
142
 
143
  # Format message with the command-r tool use template
144
  conversation = [
 
40
  ```python
41
  from mlx_lm import load, generate
42
 
43
+ model, tokenizer = load("mlx-community/c4ai-command-r-plus-4bit")
44
 
45
  # Format message with the command-r tool use template
46
  conversation = [
 
138
  ```python
139
  from mlx_lm import load, generate
140
 
141
+ model, tokenizer = load("mlx-community/c4ai-command-r-plus-4bit")
142
 
143
  # Format message with the command-r tool use template
144
  conversation = [