awacke1 commited on
Commit
27ba253
β€’
1 Parent(s): e12378e

Update backupapp.py

Browse files
Files changed (1) hide show
  1. backupapp.py +27 -23
backupapp.py CHANGED
@@ -16,11 +16,8 @@ openai.api_key = os.getenv('OPENAI_KEY')
16
  st.set_page_config(
17
  page_title="GPT Streamlit Document Reasoner",
18
  layout="wide")
19
- # st.title("GPT Chat with Optional File Context - Talk to your data!")
20
 
21
- # Output options sidebar menu
22
- # st.sidebar.title("Output Options")
23
- menu = ["txt", "htm", "md"]
24
  choice = st.sidebar.selectbox("Choose output file type to save results", menu)
25
  choicePrefix = "Output and download file set to "
26
  if choice == "txt":
@@ -30,32 +27,36 @@ elif choice == "htm":
30
  elif choice == "md":
31
  st.sidebar.write(choicePrefix + "Markdown.")
32
  elif choice == "py":
33
- st.sidebar.write(choicePrefix + "Python AI UI/UX")
34
 
35
- # sidebar slider for file input length to include in inference blocks
36
- max_length = st.sidebar.slider("Max document length", min_value=1000, max_value=32000, value=3000, step=1000)
37
 
38
- # Truncate document
39
  def truncate_document(document, length):
40
  return document[:length]
41
 
42
  def chat_with_model(prompts):
43
  model = "gpt-3.5-turbo"
44
- #model = "gpt-4-32k" # 32k tokens between prompt and inference tokens
45
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
46
  conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts])
47
  response = openai.ChatCompletion.create(model=model, messages=conversation)
48
  return response['choices'][0]['message']['content']
49
 
50
- def generate_filename(prompt):
51
  central = pytz.timezone('US/Central')
52
- safe_date_time = datetime.now(central).strftime("%m%d_%I_%M_%p")
53
- safe_prompt = "".join(x for x in prompt if x.isalnum())[:30]
54
- return f"{safe_date_time}_{safe_prompt}.txt"
55
-
56
  def create_file(filename, prompt, response):
57
- with open(filename, 'w') as file:
58
- file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
 
 
 
 
 
 
 
59
 
60
  def get_table_download_link_old(file_path):
61
  with open(file_path, 'r') as file:
@@ -132,19 +133,22 @@ def main():
132
  st.write('Response:')
133
  st.write(response)
134
 
135
- filename = generate_filename(user_prompt)
136
  create_file(filename, user_prompt, response)
137
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
138
 
139
  if len(file_content) > 0:
140
  st.markdown(f"**File Content Added:**\n{file_content}")
141
 
142
- htm_files = glob.glob("*.txt")
143
- for file in htm_files:
144
- st.sidebar.markdown(get_table_download_link(file), unsafe_allow_html=True)
145
- if st.sidebar.button(f"πŸ—‘ Delete {file}"):
146
- os.remove(file)
147
- st.experimental_rerun()
 
 
 
148
 
149
  if __name__ == "__main__":
150
  main()
 
16
  st.set_page_config(
17
  page_title="GPT Streamlit Document Reasoner",
18
  layout="wide")
 
19
 
20
+ menu = ["txt", "htm", "md", "py"]
 
 
21
  choice = st.sidebar.selectbox("Choose output file type to save results", menu)
22
  choicePrefix = "Output and download file set to "
23
  if choice == "txt":
 
27
  elif choice == "md":
28
  st.sidebar.write(choicePrefix + "Markdown.")
29
  elif choice == "py":
30
+ st.sidebar.write(choicePrefix + "Python Code.")
31
 
32
+ max_length = st.sidebar.slider("Max document length", min_value=1000, max_value=32000, value=2000, step=1000)
 
33
 
 
34
  def truncate_document(document, length):
35
  return document[:length]
36
 
37
  def chat_with_model(prompts):
38
  model = "gpt-3.5-turbo"
 
39
  conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
40
  conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts])
41
  response = openai.ChatCompletion.create(model=model, messages=conversation)
42
  return response['choices'][0]['message']['content']
43
 
44
+ def generate_filename(prompt, file_type):
45
  central = pytz.timezone('US/Central')
46
+ safe_date_time = datetime.now(central).strftime("%m%d_%I%M")
47
+ safe_prompt = "".join(x for x in prompt if x.isalnum())[:28]
48
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
49
+
50
  def create_file(filename, prompt, response):
51
+ if filename.endswith(".txt"):
52
+ with open(filename, 'w') as file:
53
+ file.write(f"Prompt:\n{prompt}\nResponse:\n{response}")
54
+ elif filename.endswith(".htm"):
55
+ with open(filename, 'w') as file:
56
+ file.write(f"<h1>Prompt:</h1> <p>{prompt}</p> <h1>Response:</h1> <p>{response}</p>")
57
+ elif filename.endswith(".md"):
58
+ with open(filename, 'w') as file:
59
+ file.write(f"# Prompt:\n{prompt}\n# Response:\n{response}")
60
 
61
  def get_table_download_link_old(file_path):
62
  with open(file_path, 'r') as file:
 
133
  st.write('Response:')
134
  st.write(response)
135
 
136
+ filename = generate_filename(user_prompt, choice)
137
  create_file(filename, user_prompt, response)
138
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
139
 
140
  if len(file_content) > 0:
141
  st.markdown(f"**File Content Added:**\n{file_content}")
142
 
143
+ all_files = glob.glob("*.txt") + glob.glob("*.htm") + glob.glob("*.md")
144
+ for file in all_files:
145
+ col1, col2 = st.sidebar.columns([4,1]) # adjust the ratio as needed
146
+ with col1:
147
+ st.markdown(get_table_download_link(file), unsafe_allow_html=True)
148
+ with col2:
149
+ if st.button("πŸ—‘", key=file):
150
+ os.remove(file)
151
+ st.experimental_rerun()
152
 
153
  if __name__ == "__main__":
154
  main()