vinhnx90 commited on
Commit
220b4de
β€’
1 Parent(s): c73003e

More proper API key handle

Browse files
Files changed (3) hide show
  1. README.md +2 -11
  2. apikey.py +0 -7
  3. app.py +23 -18
README.md CHANGED
@@ -81,17 +81,7 @@ pip install -r requirements.txt
81
  4. **Configure the App**:
82
 
83
  - Navigate to OpenAI Platform > API Keys to generate an API Key to run the model
84
- - Create a `.env` file locally, this file is hidden and will be automatically ignored by `.gitignore`
85
-
86
- ```sh
87
- touch .env
88
- ```
89
-
90
- Inside `.env` file, pass the API Key into `OPENAI_API_KEY` value
91
-
92
- ```sh
93
- OPENAI_API_KEY={YOUR_API_KEY_HERE}
94
- ```
95
 
96
  5. **Run the Streamlit App**:
97
 
@@ -101,6 +91,7 @@ OPENAI_API_KEY={YOUR_API_KEY_HERE}
101
  streamlit run app.py
102
  ```
103
  - This will start the Streamlit app and provide you with a local URL to access the app in your web browser.
 
104
 
105
  6. **Use the App**:
106
 
 
81
  4. **Configure the App**:
82
 
83
  - Navigate to OpenAI Platform > API Keys to generate an API Key to run the model
84
+ - Copy the API KEY (start with sk-)
 
 
 
 
 
 
 
 
 
 
85
 
86
  5. **Run the Streamlit App**:
87
 
 
91
  streamlit run app.py
92
  ```
93
  - This will start the Streamlit app and provide you with a local URL to access the app in your web browser.
94
+ - Pass your OpenAI API key into the `OpenAI API Key` field
95
 
96
  6. **Use the App**:
97
 
apikey.py DELETED
@@ -1,7 +0,0 @@
1
- import os
2
-
3
- from dotenv import load_dotenv
4
-
5
- load_dotenv()
6
-
7
- llm_api_key = os.environ.get("OPENAI_API_KEY")
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -8,12 +8,8 @@ from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, Te
8
  from langchain_community.embeddings.openai import OpenAIEmbeddings
9
  from langchain_community.vectorstores.chroma import Chroma
10
 
11
- from apikey import llm_api_key
12
 
13
- key = llm_api_key
14
-
15
-
16
- def load_and_process_file(file_data):
17
  """
18
  Load and process the uploaded file.
19
  Returns a vector store containing the embedded chunks of the file.
@@ -43,13 +39,13 @@ def load_and_process_file(file_data):
43
  )
44
  chunks = text_splitter.split_documents(documents)
45
 
46
- embeddings = OpenAIEmbeddings()
47
  vector_store = Chroma.from_documents(chunks, embeddings)
48
 
49
  return vector_store
50
 
51
 
52
- def initialize_chat_model(vector_store):
53
  """
54
  Initialize the chat model with the given vector store.
55
  Returns a ConversationalRetrievalChain instance.
@@ -57,7 +53,7 @@ def initialize_chat_model(vector_store):
57
  llm = ChatOpenAI(
58
  model="gpt-3.5-turbo",
59
  temperature=0,
60
- openai_api_key=key,
61
  )
62
  retriever = vector_store.as_retriever()
63
  return ConversationalRetrievalChain.from_llm(llm, retriever)
@@ -69,7 +65,6 @@ def main():
69
  """
70
 
71
  st.set_page_config(page_title="InkChatGPT", page_icon="πŸ“š")
72
-
73
  st.title("πŸ“š InkChatGPT")
74
  st.write("Upload a document and ask questions related to its content.")
75
 
@@ -77,22 +72,32 @@ def main():
77
  "Select a file", type=["pdf", "docx", "txt"], key="file_uploader"
78
  )
79
 
80
- if uploaded_file:
 
 
 
 
81
  add_file = st.button(
82
  "Process File",
83
  on_click=clear_history,
84
  key="process_button",
85
  )
86
 
87
- if uploaded_file and add_file:
88
- with st.spinner("πŸ’­ Thinking..."):
89
- vector_store = load_and_process_file(uploaded_file)
90
- if vector_store:
91
- crc = initialize_chat_model(vector_store)
92
- st.session_state.crc = crc
93
- st.success("File processed successfully!")
 
 
 
 
 
 
 
94
 
95
- if "crc" in st.session_state:
96
  st.markdown("## Ask a Question")
97
  question = st.text_area(
98
  "Enter your question",
 
8
  from langchain_community.embeddings.openai import OpenAIEmbeddings
9
  from langchain_community.vectorstores.chroma import Chroma
10
 
 
11
 
12
+ def load_and_process_file(file_data, openai_api_key):
 
 
 
13
  """
14
  Load and process the uploaded file.
15
  Returns a vector store containing the embedded chunks of the file.
 
39
  )
40
  chunks = text_splitter.split_documents(documents)
41
 
42
+ embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
43
  vector_store = Chroma.from_documents(chunks, embeddings)
44
 
45
  return vector_store
46
 
47
 
48
+ def initialize_chat_model(vector_store, openai_api_key):
49
  """
50
  Initialize the chat model with the given vector store.
51
  Returns a ConversationalRetrievalChain instance.
 
53
  llm = ChatOpenAI(
54
  model="gpt-3.5-turbo",
55
  temperature=0,
56
+ openai_api_key=openai_api_key,
57
  )
58
  retriever = vector_store.as_retriever()
59
  return ConversationalRetrievalChain.from_llm(llm, retriever)
 
65
  """
66
 
67
  st.set_page_config(page_title="InkChatGPT", page_icon="πŸ“š")
 
68
  st.title("πŸ“š InkChatGPT")
69
  st.write("Upload a document and ask questions related to its content.")
70
 
 
72
  "Select a file", type=["pdf", "docx", "txt"], key="file_uploader"
73
  )
74
 
75
+ openai_api_key = st.text_input(
76
+ "OpenAI API Key", type="password", disabled=not (uploaded_file)
77
+ )
78
+
79
+ if uploaded_file and openai_api_key.startswith("sk-"):
80
  add_file = st.button(
81
  "Process File",
82
  on_click=clear_history,
83
  key="process_button",
84
  )
85
 
86
+ if uploaded_file and add_file:
87
+ with st.spinner("πŸ’­ Thinking..."):
88
+ vector_store = load_and_process_file(
89
+ uploaded_file,
90
+ openai_api_key,
91
+ )
92
+
93
+ if vector_store:
94
+ crc = initialize_chat_model(
95
+ vector_store,
96
+ openai_api_key=openai_api_key,
97
+ )
98
+ st.session_state.crc = crc
99
+ st.success("File processed successfully!")
100
 
 
101
  st.markdown("## Ask a Question")
102
  question = st.text_area(
103
  "Enter your question",