Create chatbot.py
Browse files- modules/chatbot.py +21 -0
modules/chatbot.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
import torch
|
3 |
+
|
4 |
+
class Llama2Chatbot:
|
5 |
+
def __init__(self):
|
6 |
+
self.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
7 |
+
self.model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
|
8 |
+
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
+
self.model.to(self.device)
|
10 |
+
|
11 |
+
def generate_response(self, prompt, max_length=100):
|
12 |
+
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
|
13 |
+
outputs = self.model.generate(**inputs, max_length=max_length)
|
14 |
+
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
15 |
+
return response
|
16 |
+
|
17 |
+
def initialize_chatbot():
|
18 |
+
return Llama2Chatbot()
|
19 |
+
|
20 |
+
def get_chatbot_response(chatbot, prompt):
|
21 |
+
return chatbot.generate_response(prompt)
|