import os import streamlit as st from langchain.chains import create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain from langchain_together import ChatTogether from langchain_community.document_loaders import WebBaseLoader from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_community.vectorstores import FAISS from langchain_core.prompts import ChatPromptTemplate st.set_page_config(page_title="Chat with Website") st.title("Chat with Website") # User input for website URL url_input = st.text_input("Enter a website URL:") # Initialize variables for documents and retrieval chain docs = [] documents = [] retriever = None result = None # Load documents from the user-provided web URL if provided if url_input: try: loader = WebBaseLoader(url_input) docs = loader.load() # Split documents into manageable pieces using recursive character splitter text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50) # You can adjust these parameters documents = text_splitter.split_documents(docs) # Create embeddings with Hugging Face and store in FAISS index embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") vector_store = FAISS.from_documents(documents, embeddings) # Set up the retriever retriever = vector_store.as_retriever() except Exception as e: st.error(f"Failed to load documents from the specified URL: {str(e)}") docs = [] documents = [] # Set up the Together chat model with environment variable for API key llm = ChatTogether( together_api_key=os.environ.get("TOGETHER_API_KEY"), model="meta-llama/Llama-3-70b-chat-hf" ) # Streamlit interface for user query about the website content user_query = st.text_area("Ask a question about the website content:", height=200) # Process the input when the button is clicked if st.button("Submit"): if documents and user_query and retriever: # Prepare the context with the retriever context = retriever.retrieve(user_query) # Create the retrieval chain if documents are available prompt_template = """Answer the following question briefly based only on the provided context: {context} Question: {input}""" prompt = ChatPromptTemplate.from_template(prompt_template).render(context=context, input=user_query) # Invoke the Together model with context result = llm.invoke(prompt) # Correctly access the 'content' from the 'AIMessage' object st.write(result.content) else: st.warning("Please enter a valid URL and a question to proceed.")