File size: 2,083 Bytes
182219d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from langchain.agents import AgentExecutor, create_react_agent
from langchain.prompts import PromptTemplate
from memory import memory
from tools import zeroshot_tools
import pandas as pd
import os
import streamlit as st
#from langchain_community.llms import HuggingFaceHub
from typing import List
from langchain_groq import ChatGroq
from dotenv import load_dotenv
load_dotenv()

groq_api_key = os.getenv("GROQ_API_KEY")

llm1 = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")

def read_first_3_rows():
    dataset_path = "dataset.csv"
    try:
        df = pd.read_csv(dataset_path)
        first_3_rows = df.head(3).to_string(index=False)
    except FileNotFoundError:
        first_3_rows = "Error: Dataset file not found."

    return first_3_rows


def get_agent_chain():


    dataset_first_3_rows = read_first_3_rows()

    prompt = PromptTemplate(

    input_variables = ['agent_scratchpad', 'chat_history', 'input', 'tool_names', 'tools'],
    template = ( f"""
You are a helpful assistant that can help users explore a dataset.
First 3 rows of the dataset:
{dataset_first_3_rows}
===="""
"""
TOOLS:
------
You has access to the following tools:

{tools}

To use a tool, please use the following format:

Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action

When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:

Thought: Do I need to use a tool? No
Final Answer: [your response here]

Begin!

New input: {input}
{agent_scratchpad}"""
    )

    )


    conversational_agent_llm = llm1
    #conversational_agent_llm = ChatOpenAI(model="gpt-3.5-turbo-16k", temperature=temperature, streaming=True)
    conversational_agent = create_react_agent(conversational_agent_llm, zeroshot_tools, prompt)
    room_selection_chain = AgentExecutor(agent=conversational_agent, tools=zeroshot_tools, verbose=True, memory=memory, handle_parsing_errors=True, max_iterations=4)
    return room_selection_chain