nchen909 commited on
Commit
3d66326
β€’
1 Parent(s): 780d871

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -2,10 +2,10 @@ import gradio as gr
2
  from gpt4all import GPT4All
3
  from huggingface_hub import hf_hub_download
4
 
5
- title = "Apollo-7B-GGUF Run On CPU"
6
 
7
  description = """
8
- πŸ”Ž [Apollo-7B](https://huggingface.co/FreedomIntelligence/Apollo-7B) [GGUF format model](https://huggingface.co/FreedomIntelligence/Apollo-7B-GGUF) , 8-bit quantization balanced quality gguf version, running on CPU. Using [GitHub - llama.cpp](https://github.com/ggerganov/llama.cpp) [GitHub - gpt4all](https://github.com/nomic-ai/gpt4all).
9
 
10
  πŸ”¨ Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue.
11
 
@@ -13,13 +13,12 @@ Mistral does not support system prompt symbol (such as ```<<SYS>>```) now, input
13
  """
14
 
15
  """
16
- [Model From TheBloke/Mistral-7B-Instruct-v0.1-GGUF](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF)
17
- [Mistral-instruct-v0.1 System prompt](https://docs.mistral.ai/usage/guardrailing)
18
  """
19
 
20
  model_path = "models"
21
- model_name = "Apollo-7B-q8_0.gguf"
22
- hf_hub_download(repo_id="FreedomIntelligence/Apollo-7B-GGUF", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)
23
 
24
  print("Start the model init process")
25
  model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")
 
2
  from gpt4all import GPT4All
3
  from huggingface_hub import hf_hub_download
4
 
5
+ title = "Apollo-6B-GGUF Run On CPU"
6
 
7
  description = """
8
+ πŸ”Ž [Apollo-6B](https://huggingface.co/FreedomIntelligence/Apollo-6B) [GGUF format model](https://huggingface.co/FreedomIntelligence/Apollo-6B-GGUF) , 8-bit quantization balanced quality gguf version, running on CPU. Using [GitHub - llama.cpp](https://github.com/ggerganov/llama.cpp) [GitHub - gpt4all](https://github.com/nomic-ai/gpt4all).
9
 
10
  πŸ”¨ Running on CPU-Basic free hardware. Suggest duplicating this space to run without a queue.
11
 
 
13
  """
14
 
15
  """
16
+ [Model From FreedomIntelligence/Apollo-6B-GGUF](https://huggingface.co/FreedomIntelligence/Apollo-6B-GGUF)
 
17
  """
18
 
19
  model_path = "models"
20
+ model_name = "Apollo-6B-q8_0.gguf"
21
+ hf_hub_download(repo_id="FreedomIntelligence/Apollo-6B-GGUF", filename=model_name, local_dir=model_path, local_dir_use_symlinks=False)
22
 
23
  print("Start the model init process")
24
  model = model = GPT4All(model_name, model_path, allow_download = False, device="cpu")