import streamlit as st import pandas as pd import torch from transformers import pipeline from transformers import TapasTokenizer, TapexTokenizer, BartForConditionalGeneration import datetime #df = pd.read_excel('discrepantes.xlsx', index_col='Unnamed: 0') df = pd.read_excel('discrepantes.xlsx') df.fillna(0, inplace=True) table_data = df.astype(str) print(table_data.head()) def response(user_question, table_data): a = datetime.datetime.now() model_name = "microsoft/tapex-large-finetuned-wtq" model = BartForConditionalGeneration.from_pretrained(model_name) tokenizer = TapexTokenizer.from_pretrained(model_name) queries = [user_question] encoding = tokenizer(table=table_data, query=queries, padding=True, return_tensors="pt",truncation=True) outputs = model.generate(**encoding) ans = tokenizer.batch_decode(outputs, skip_special_tokens=True) query_result = { "Resposta": ans[0] } b = datetime.datetime.now() print(b - a) return query_result # Streamlit interface st.markdown("""
Chatbot do Tesouro RS
""", unsafe_allow_html=True) # Chat history if 'history' not in st.session_state: st.session_state['history'] = [] # Input box for user question user_question = st.text_input("Escreva sua questΓ£o aqui:", "") if user_question: # Add person emoji when typing question st.session_state['history'].append(('πŸ‘€', user_question)) st.markdown(f"**πŸ‘€ {user_question}**") # Generate the response bot_response = response(user_question, table_data) # Add robot emoji when generating response and align to the right st.session_state['history'].append(('πŸ€–', bot_response)) st.markdown(f"
**πŸ€– {bot_response}**
", unsafe_allow_html=True) # Clear history button if st.button("Limpar"): st.session_state['history'] = [] # Display chat history for sender, message in st.session_state['history']: if sender == 'πŸ‘€': st.markdown(f"**πŸ‘€ {message}**") elif sender == 'πŸ€–': st.markdown(f"
**πŸ€– {message}**
", unsafe_allow_html=True)