import streamlit as st import openai import os import base64 import glob from datetime import datetime from dotenv import load_dotenv from openai import ChatCompletion load_dotenv() openai.api_key = os.getenv('OPENAI_KEY') def chat_with_model(prompts): model = "gpt-3.5-turbo" conversation = [{'role': 'system', 'content': 'You are a helpful assistant.'}] conversation.extend([{'role': 'user', 'content': prompt} for prompt in prompts]) response = openai.ChatCompletion.create(model=model, messages=conversation) return response['choices'][0]['message']['content'] def generate_filename(prompt): safe_date_time = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") safe_prompt = "".join(x for x in prompt if x.isalnum())[:50] return f"{safe_date_time}_{safe_prompt}.htm" def create_file(filename, prompt, response): with open(filename, 'w') as file: file.write(f"

Prompt:

{prompt}

Response:

{response}

") def get_table_download_link(file_path): with open(file_path, 'r') as file: data = file.read() b64 = base64.b64encode(data.encode()).decode() href = f'{os.path.basename(file_path)}' return href def main(): st.title("Chat with AI") # Pre-defined prompts prompts = ['Hows the weather?', 'Tell me a joke.', 'What is the meaning of life?'] # User prompt input user_prompt = st.text_input("Your question:", '') if user_prompt: prompts.append(user_prompt) if st.button('Chat'): st.write('Chatting with GPT-3...') response = chat_with_model(prompts) st.write('Response:') st.write(response) filename = generate_filename(user_prompt) create_file(filename, user_prompt, response) st.markdown(get_table_download_link(filename), unsafe_allow_html=True) htm_files = glob.glob("*.htm") for file in htm_files: st.markdown(get_table_download_link(file), unsafe_allow_html=True) if __name__ == "__main__": main()