import streamlit as st import pandas as pd import torch from transformers import pipeline from transformers import TapasTokenizer, TapasForQuestionAnswering df = pd.read_excel('discrepantes.xlsx', index_col='Unnamed: 0') df.fillna(0, inplace=True) table_data = df.astype(str) print(table_data.head()) def response(user_question, table_data): tokenizer = TapasTokenizer.from_pretrained("google/tapas-large-finetuned-wtq", drop_rows_to_fit=True) model = TapasForQuestionAnswering.from_pretrained("google/tapas-large-finetuned-wtq") inputs = tokenizer(table=table_data, queries=user_question, padding="max_length", truncation=True, return_tensors="pt") outputs = model(**inputs) predicted_answer_coordinates = outputs.predicted_answer_coordinates.detach().cpu().numpy() id2aggregation = {0: 'NONE', 1: 'SUM', 2: 'AVERAGE', 3: 'COUNT'} aggregation_predictions = id2aggregation[outputs.aggregation_predictions.detach().cpu().numpy()[0]] return predicted_answer_coordinates, aggregation_predictions # Streamlit interface st.markdown("""
Chatbot do Tesouro RS
""", unsafe_allow_html=True) # Chat history if 'history' not in st.session_state: st.session_state['history'] = [] # Input box for user question user_question = st.text_input("Escreva sua questΓ£o aqui:", "") if user_question: # Add person emoji when typing question st.session_state['history'].append(('πŸ‘€', user_question)) st.markdown(f"**πŸ‘€ {user_question}**") # Generate the response bot_response = response(user_question, table_data) # Add robot emoji when generating response and align to the right st.session_state['history'].append(('πŸ€–', bot_response)) st.markdown(f"
**πŸ€– {bot_response}**
", unsafe_allow_html=True) # Clear history button if st.button("Limpar"): st.session_state['history'] = [] # Display chat history for sender, message in st.session_state['history']: if sender == 'πŸ‘€': st.markdown(f"**πŸ‘€ {message}**") elif sender == 'πŸ€–': st.markdown(f"
**πŸ€– {message}**
", unsafe_allow_html=True)