File size: 5,811 Bytes
73c4d00
967b746
73c4d00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
967b746
73c4d00
967b746
73c4d00
 
 
 
967b746
73c4d00
967b746
73c4d00
 
 
 
 
 
 
 
 
 
 
 
 
967b746
 
73c4d00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d072276
 
73c4d00
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import streamlit as st
from streamlit_chat import message
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders import PyPDFLoader, DirectoryLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import CTransformers
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
import streamlit.components.v1 as components
from templatesStreamlit import *
import tempfile
import os

# Funcion para leer los documentos
def load_documents(uploaded_files):
    docs = []
    temp_dir = tempfile.TemporaryDirectory()
    for file in uploaded_files:
        temp_filepath = os.path.join(temp_dir.name, file.name)
        with open(temp_filepath, "wb") as f:
            f.write(file.getvalue())
        loader = PyPDFLoader(temp_filepath)
        docs.extend(loader.load())

    # loader = DirectoryLoader('data/', glob="*.pdf", loader_cls=PyPDFLoader)
    # documents = loader.load()
    return docs

# Funcion para convertir el texto en chunks
def split_text_into_chunks(documents):
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
    text_chunks = text_splitter.split_documents(documents)
    return text_chunks

def get_vectorstore(text_chunks):
    embbedings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': "cpu"})
    vector_store = FAISS.from_documents(text_chunks, embbedings)
    return vector_store

# def create_llms_model():
#     llm = CTransformers(model="mistral-7b-instruct-v0.1.Q4_K_M.gguf", config={'max_new_tokens': 512, 'temperature': 0.01})
#     return llm

def get_conversation_chain(vector_store):

    llm = CTransformers(model="mistral-7b-instruct-v0.1.Q4_K_M.gguf", config={'max_new_tokens': 512, 'temperature': 0.01})
    
    #Creamos la memoria
    memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)

    # Create chain (lANGCHAIN)
    conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
                                                retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
                                                memory=memory)
    return conversation_chain

def handle_userinput(user_question):
    response = st.session_state.conversation({'question': user_question})
    st.session_state.chat_history = response['chat_history']

    for i, message in enumerate(st.session_state.chat_history):
        if i % 2 == 0:
            st.write(user_template2.replace(
                "{{MSG}}", message.content), unsafe_allow_html=True)
        else:
            st.write(bot_template2.replace(
                "{{MSG}}", message.content), unsafe_allow_html=True)


def main():
    url_logo = "https://github.com/manolito99/DataScienceLLM/blob/main/static/logo_alternativo.png?raw=true"
    st.set_page_config(page_title="LLM-RAG",
                       page_icon=url_logo)
    st.write(css, unsafe_allow_html=True)

    titulo = f"""
     <div class="btn-neon">
       <span class="icon"><img src=https://github.com/manolito99/DataScienceLLM/blob/main/static/Mistral.png?raw=true></span>
        Mistral7b + Streamlit
        <span class="icon"><img src=https://github.com/manolito99/DataScienceLLM/blob/main/static/streamlit.png?raw=true></span>
     </div>
    """
    st.markdown(titulo, unsafe_allow_html=True)

    presentacion = f"""
            <div class="skill">
                <div class="skill-content">
                    <div class="skill-img-box">
                        <a href="https://www.linkedin.com/in/manueloteromarquez/" target="_blank">
                        <img src="https://media.licdn.com/dms/image/C4D03AQEsabRcMGkMmQ/profile-displayphoto-shrink_800_800/0/1663585925916?e=1708560000&v=beta&t=1Ofx1PsbTSlMcNIVCxznEjtIA_aIlTVaJm52toMKddU" alt="Tu descripción">
                        </a>                    
                    </div>
                    <div class="skill-detail">
                        <h2 class="skill-title">By Manuel Otero Márquez </h2>
                        <p>Esto es un ejemplo de como se pueden implementar LLM de forma local y con CPU</p>
                        <div class="skill-progress">
                            <div class="progress progress-1"></div>
                        </div>
                    </div>
                </div>
            </div>
"""
    st.markdown(presentacion, unsafe_allow_html=True)

    if "conversation" not in st.session_state:
        st.session_state.conversation = None
    if "chat_history" not in st.session_state:
        st.session_state.chat_history = None

    st.header("Hazle preguntas a tus documentos PDFs :books:")


    with st.sidebar:
        st.subheader("Tus Documentos")
        pdf_docs = st.file_uploader(
            "Sube tus PDFs aquí y pulsa 'Procesar PDF'", accept_multiple_files=True)
        if not pdf_docs:
            st.info("Sube tus pdfs para continuar.")
            st.stop()
            
        if st.button("Procesar PDF"):
            with st.spinner("Procesando"):
                # get pdf text
                documents = load_documents(pdf_docs)
                print(documents)
                text_chunks = split_text_into_chunks(documents)
                # create vector store
                vectorstore = get_vectorstore(text_chunks)
                # create conversation chain
                st.session_state.conversation = get_conversation_chain(vectorstore)
    
    user_question = st.text_input("Adelante pregunta")
    if user_question:
        with st.spinner("Procesando respuesta"):
            handle_userinput(user_question)




if __name__ == '__main__':
    main()