Spaces:
Sleeping
Sleeping
File size: 1,227 Bytes
f483191 03b857c 1f1b415 8d63ce3 03b857c f483191 1f1b415 f483191 c885860 f483191 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 22:17:43 2023
@author: Loges
"""
import streamlit as st
from transformers import pipeline, T5Tokenizer, T5ForConditionalGeneration
model=T5ForConditionalGeneration.from_pretrained("Logeswaransr/results_T5").to("cuda")
tokenizer=T5Tokenizer.from_pretrained("Logeswaransr/results_T5")
pipe=pipeline('text2text-generation', model=model, tokenizer=tokenizer)
st.set_page_config(page_title='Sample Chatbot', layout='wide')
if 'messages' not in st.session_state:
st.session_state.messages=[]
st.subheader("Sample Chatbot")
for message in st.session_state.messages:
with st.chat_message(message['role']):
st.markdown(message['content'])
## messages element format: {'role':'user', 'content':'<user prompt>'}
if prompt:=st.chat_input("What is up!"):
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({
'role':'user',
'content': prompt})
out=pipe(prompt)
response = f"Analysis: {out}"
with st.chat_message("assistant"):
st.markdown(response)
st.session_state.messages.append({
'role':'assistant',
'content': response}) |