Braddy commited on
Commit
17be8c2
1 Parent(s): 35a2a1f
Files changed (1) hide show
  1. app.py +19 -20
app.py CHANGED
@@ -17,28 +17,28 @@ from langchain.schema import SystemMessage
17
 
18
  import urllib
19
 
20
- # urllib.request.urlretrieve(
21
- # "https://huggingface.co/hfl/chinese-alpaca-2-7b-rlhf-gguf/resolve/main/ggml-model-q6_k.gguf?download=true",
22
- # "ggml-model-q6_k.gguf"
23
- # )
24
 
25
- # template_messages = [
26
- # SystemMessage(content="你是一名软件工程师,你的名字叫做贺英旭。请你以这个身份回答以下问题!"),
27
- # MessagesPlaceholder(variable_name="chat_history"),
28
- # HumanMessagePromptTemplate.from_template("{text}"),
29
- # ]
30
 
31
- # prompt_template = ChatPromptTemplate.from_messages(template_messages)
32
 
33
- # llm = LlamaCpp(
34
- # model_path="ggml-model-q6_k.gguf",
35
- # temperature=0.75,
36
- # max_tokens=64
37
- # )
38
- # model = Llama2Chat(llm=llm)
39
 
40
- # memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
41
- # chain = LLMChain(llm=model, prompt=prompt_template, memory=memory)
42
 
43
 
44
  def add_text(history, text):
@@ -60,8 +60,7 @@ def bot(history):
60
 
61
  def infer(question):
62
 
63
- # result = chain.run(text=question).strip()
64
- result = "123123123123"
65
  print(result)
66
  return result
67
 
 
17
 
18
  import urllib
19
 
20
+ urllib.request.urlretrieve(
21
+ "https://huggingface.co/hfl/chinese-alpaca-2-7b-rlhf-gguf/resolve/main/ggml-model-q6_k.gguf?download=true",
22
+ "ggml-model-q6_k.gguf"
23
+ )
24
 
25
+ template_messages = [
26
+ SystemMessage(content="你是一名软件工程师,你的名字叫做贺英旭。请你以这个身份回答以下问题!"),
27
+ MessagesPlaceholder(variable_name="chat_history"),
28
+ HumanMessagePromptTemplate.from_template("{text}"),
29
+ ]
30
 
31
+ prompt_template = ChatPromptTemplate.from_messages(template_messages)
32
 
33
+ llm = LlamaCpp(
34
+ model_path="ggml-model-q6_k.gguf",
35
+ temperature=0.75,
36
+ max_tokens=64
37
+ )
38
+ model = Llama2Chat(llm=llm)
39
 
40
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
41
+ chain = LLMChain(llm=model, prompt=prompt_template, memory=memory)
42
 
43
 
44
  def add_text(history, text):
 
60
 
61
  def infer(question):
62
 
63
+ result = chain.run(text=question).strip()
 
64
  print(result)
65
  return result
66