Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -21,17 +21,26 @@ def load_model():
|
|
21 |
model = Llama(model_path, embedding=True)
|
22 |
|
23 |
st.success("Loaded NLP model from Hugging Face!") # π Show a success message
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
#
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
# prompt_template = "<|system|>\
|
36 |
# </s>\
|
37 |
# <|user|>\
|
@@ -39,26 +48,22 @@ def load_model():
|
|
39 |
# <|assistant|>"
|
40 |
# template = prompt_template
|
41 |
# prompt = PromptTemplate.from_template(template)
|
42 |
-
|
43 |
-
|
44 |
-
# model_path=model_path_model,
|
45 |
-
# temperature=0.75,
|
46 |
-
# max_tokens=2500,
|
47 |
-
# top_p=1,
|
48 |
-
# callback_manager=callback_manager,
|
49 |
-
# verbose=True,
|
50 |
-
# n_ctx=2048,
|
51 |
-
# n_threads = 2# Verbose is required to pass to the callback manager
|
52 |
-
# )
|
53 |
-
return model
|
54 |
|
55 |
st.title("Please ask your question on Lithuanian rules for foreigners.")
|
56 |
-
|
|
|
|
|
57 |
question = st.text_input("Enter your question:")
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
21 |
model = Llama(model_path, embedding=True)
|
22 |
|
23 |
st.success("Loaded NLP model from Hugging Face!") # π Show a success message
|
24 |
+
|
25 |
+
model_2_name = "TheBloke/zephyr-7B-beta-GGUF"
|
26 |
+
model_2base_name = "zephyr-7b-beta.Q4_K_M.gguf"
|
27 |
+
model_path_model = hf_hub_download(
|
28 |
+
repo_id=model_2_name,
|
29 |
+
filename=model_2base_name,
|
30 |
+
cache_dir= '/content/models' # Directory for the model
|
31 |
+
)
|
32 |
+
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
|
33 |
+
llm = LlamaCpp(
|
34 |
+
model_path=model_path_model,
|
35 |
+
temperature=0.75,
|
36 |
+
max_tokens=2500,
|
37 |
+
top_p=1,
|
38 |
+
callback_manager=callback_manager,
|
39 |
+
verbose=True,
|
40 |
+
n_ctx=2048,
|
41 |
+
n_threads = 2# Verbose is required to pass to the callback manager
|
42 |
+
)
|
43 |
+
st.success("loaded the second NLP model from Hugging Face!")
|
44 |
# prompt_template = "<|system|>\
|
45 |
# </s>\
|
46 |
# <|user|>\
|
|
|
48 |
# <|assistant|>"
|
49 |
# template = prompt_template
|
50 |
# prompt = PromptTemplate.from_template(template)
|
51 |
+
|
52 |
+
return model, llm
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
st.title("Please ask your question on Lithuanian rules for foreigners.")
|
55 |
+
model,llm = load_model()
|
56 |
+
pc = Pinecone(api_key=apikeys)
|
57 |
+
index = pc.Index("law")
|
58 |
question = st.text_input("Enter your question:")
|
59 |
+
query = model.create_embedding(question)
|
60 |
+
q = query['data'][0]['embedding']
|
61 |
+
response = index.query(
|
62 |
+
vector=q,
|
63 |
+
top_k=1,
|
64 |
+
include_metadata = True,
|
65 |
+
namespace = "ns1"
|
66 |
+
)
|
67 |
+
response_t = response['matches'][0]['metadata']['text']
|
68 |
+
st.header("Answer:")
|
69 |
+
st.write(response_t)
|