File size: 2,098 Bytes
189a7a7 5b015fd 189a7a7 5b015fd 0e3c31a 189a7a7 3c353fd 189a7a7 3c353fd 189a7a7 3c353fd 189a7a7 3c353fd c664e72 189a7a7 3c353fd 189a7a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import os
#from langchain import PromptTemplate, HuggingFaceHub, LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
import langchain.globals
from transformers import AutoModelForCausalLM, AutoTokenizer
import streamlit as st
from langchain_core.runnables.base import Runnable
class HuggingFaceModelWrapper(Runnable): # Assuming Runnable is the required interface
def __init__(self, model, tokenizer):
self.model = model
self.tokenizer = tokenizer
def run(self, input_text):
# Convert the input text to tokens
input_ids = self.tokenizer.encode(input_text, return_tensors="pt")
# Generate a response from the model
output = self.model.generate(input_ids, max_length=100, num_return_sequences=1)
# Decode the generated tokens to a string
response_text = self.tokenizer.decode(output[0], skip_special_tokens=True)
return response_text
def invoke(self, *args, **kwargs):
# Implement the 'invoke' method as required by the abstract base class/interface
# The implementation here depends on what 'invoke' is supposed to do. As an example:
# Assuming 'invoke' should process some input and return a model response
input_text = args[0] if args else kwargs.get('input_text', '')
return self.run(input_text)
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
model = AutoModelForCausalLM.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
return tokenizer,model
def demo_miny_memory(model):
# llm_data = get_Model(hugging_face_key)
memory = ConversationBufferMemory(llm = model,max_token_limit = 512)
return memory
def demo_chain(input_text, memory,model):
# llm_data = get_Model(hugging_face_key)
llm_conversation = ConversationChain(llm=model,memory=memory,verbose=langchain.globals.get_verbose())
chat_reply = llm_conversation.predict(input=input_text)
return chat_reply |