File size: 2,001 Bytes
78b4546
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94f5c11
78b4546
 
 
 
 
 
 
 
 
 
94f5c11
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from sentence_transformers import SentenceTransformer
from qdrant_client import QdrantClient
import torch
from llama_cpp import Llama

llm = Llama.from_pretrained(
	repo_id="Suku0/mistral-7b-instruct-v0.3-bnb-4bit-GGUF",
	filename="mistral-7b-instruct-v0.3-bnb-4bit.Q4_K_M.gguf",
    n_ctx=16384
)
embedding_model = SentenceTransformer('nomic-ai/nomic-embed-text-v1.5', trust_remote_code=True)
qdrant_client = QdrantClient(
    url="https://9a5cbf91-7dac-4dd0-80f6-13e512da1060.europe-west3-0.gcp.cloud.qdrant.io:6333",
    api_key="1M-sCCVolJOOJeRXMBUh4wHfj8bkY4nZyHiau0LBllFr1vsXb1oDPg",
)

def retrieve_context(query):
    query_vector = embedding_model.encode(query).tolist()

    search_result = qdrant_client.search(
        collection_name="ctx_collection",
        query_vector=query_vector,
        limit=10,
        with_payload=True
    )

    context = " ".join([hit.payload["text"] for hit in search_result])
    return context

def respond(message, history, system_message, max_tokens, temperature, top_p):
    context = retrieve_context(message)
    prompt = f"""You are a helpful assistant. Please answer the user's question based on the given context. If the context doesn't provide any answer, say the context doesn't provide the answer.

### Context:
{context}

### Question:
{message}

### Answer:
"""

    response = llm(prompt.format(ctx=context, question=message), max_tokens=243)

    return response["choices"][0]["text"]

demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
    ]
)

if __name__ == "__main__":
    demo.launch()