smcleod commited on
Commit
f70a7e1
1 Parent(s): 91c8dde

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +97 -0
README.md CHANGED
@@ -20,6 +20,103 @@ tags:
20
  This model was converted to GGUF format from [`Qwen/Qwen2.5-Coder-7B-Instruct`](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
21
  Refer to the [original model card](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct) for more details on the model.
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  ## Use with llama.cpp
24
  Install llama.cpp through brew (works on Mac and Linux)
25
 
 
20
  This model was converted to GGUF format from [`Qwen/Qwen2.5-Coder-7B-Instruct`](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.
21
  Refer to the [original model card](https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct) for more details on the model.
22
 
23
+
24
+ ## Ollama Modelfile (draft/beta!)
25
+
26
+ ```
27
+
28
+ # ollama create qwen2.5-coder-7b-instruct:q8_0 -f modelfiles/Modelfile-qwen2.5-coder
29
+
30
+ FROM ../qwen2.5-coder-7b-instruct-q8_0.gguf
31
+
32
+ # This is Sam's hacked up template 2024-09-19
33
+ TEMPLATE """
34
+ {{- $fim_prefix := .FIMPrefix -}}
35
+ {{- $fim_suffix := .FIMSuffix -}}
36
+ {{- $repo_name := .RepoName -}}
37
+ {{- $files := .Files -}}
38
+ {{- $has_tools := gt (len .Tools) 0 -}}
39
+ {{- if $fim_prefix -}}
40
+ <|fim_prefix|>{{ $fim_prefix }}<|fim_suffix|>{{ $fim_suffix }}<|fim_middle|>
41
+ {{- else if $repo_name -}}
42
+ <|repo_name|>{{ $repo_name }}
43
+ {{- range $files }}
44
+ <|file_sep|>{{ .Path }}
45
+ {{ .Content }}
46
+ {{- end }}
47
+ {{- else -}}
48
+ {{- if or .System $has_tools -}}
49
+ <|im_start|>system
50
+ {{- if .System }}
51
+ {{ .System }}
52
+ {{- end }}
53
+ {{- if $has_tools }}
54
+
55
+ # Tools
56
+
57
+ You may call one or more functions to assist with the user query.
58
+
59
+ You are provided with function signatures within <tools></tools> XML tags:
60
+ <tools>
61
+ {{- range .Tools }}
62
+ {"type": "function", "function": {{ .Function }}}
63
+ {{- end }}
64
+ </tools>
65
+
66
+ For each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:
67
+ <tool_call>
68
+ {"name": <function-name>, "arguments": <args-json-object>}
69
+ </tool_call>
70
+ {{- end }}
71
+ <|im_end|>
72
+ {{- end }}
73
+ {{- if .Messages }}
74
+ {{- range $i, $message := .Messages }}
75
+ {{- if eq .Role "user" }}<|im_start|>user
76
+ {{ .Content }}<|im_end|>
77
+ {{- else if eq .Role "assistant" }}<|im_start|>assistant
78
+ {{- if .Content }}{{ .Content }}
79
+ {{- else if .ToolCalls }}<tool_call>
80
+ {{- range .ToolCalls }}
81
+ {"name": "{{ .Function.Name }}", "arguments": {{ .Function.Arguments }}}
82
+ {{- end }}
83
+ </tool_call>
84
+ {{- end }}<|im_end|>
85
+ {{- else if eq .Role "tool" }}<|im_start|>user
86
+ <tool_response>
87
+ {{ .Content }}
88
+ </tool_response><|im_end|>
89
+ {{- end }}
90
+ {{- end }}
91
+ {{- else if .Prompt -}}
92
+ <|im_start|>user
93
+ {{ .Prompt }}<|im_end|>
94
+ {{- end -}}
95
+ <|im_start|>assistant
96
+ {{ .Response }}
97
+ {{- end -}}
98
+ """
99
+
100
+ PARAMETER stop "<|im_start|>"
101
+ PARAMETER stop "<|im_end|>"
102
+ PARAMETER stop "<|fim_prefix|>"
103
+ PARAMETER stop "<|fim_suffix|>"
104
+ PARAMETER stop "<|fim_middle|>"
105
+ PARAMETER stop "<|repo_name|>"
106
+ PARAMETER stop "<|file_sep|>"
107
+
108
+ ### Tuning ##
109
+ PARAMETER num_ctx 16384
110
+ PARAMETER temperature 0.3
111
+ PARAMETER top_p 0.8
112
+
113
+ # PARAMETER num_batch 1024
114
+ # PARAMETER num_keep 512
115
+ # PARAMETER presence_penalty 0.2
116
+ # PARAMETER frequency_penalty 0.2
117
+ # PARAMETER repeat_last_n 50
118
+ ```
119
+
120
  ## Use with llama.cpp
121
  Install llama.cpp through brew (works on Mac and Linux)
122