import streamlit as st import pandas as pd import torch from transformers import pipeline import datetime # Load the data df = pd.read_excel('discrepantes.xlsx') df.fillna(0, inplace=True) # Function to generate a response using the TAPEX model def response(user_question, df): a = datetime.datetime.now() tqa = pipeline(task="table-question-answering", model="google/tapas-large-finetuned-wtq") answer = tqa(table=df, query=user_question)['answer'] query_result = { "Resposta": answer } b = datetime.datetime.now() print(b - a) return query_result # Streamlit interface st.markdown("""
Chatbot do Tesouro RS
""", unsafe_allow_html=True) # Chat history if 'history' not in st.session_state: st.session_state['history'] = [] # Input box for user question user_question = st.text_input("Escreva sua questΓ£o aqui:", "") if user_question: # Add human emoji when user asks a question st.session_state['history'].append(('πŸ‘€', user_question)) st.markdown(f"**πŸ‘€ {user_question}**") # Generate the response bot_response = response(user_question, df)["Resposta"] # Add robot emoji when generating response and align to the right st.session_state['history'].append(('πŸ€–', bot_response)) st.markdown(f"
**πŸ€– {bot_response}**
", unsafe_allow_html=True) # Clear history button if st.button("Limpar"): st.session_state['history'] = [] # Display chat history for sender, message in st.session_state['history']: if sender == 'πŸ‘€': st.markdown(f"**πŸ‘€ {message}**") elif sender == 'πŸ€–': st.markdown(f"
**πŸ€– {message}**
", unsafe_allow_html=True)