YashB1 commited on
Commit
01289c8
1 Parent(s): 42e9f3f

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. Data.csv +3 -0
  3. app.py +177 -0
  4. src.py +131 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Data.csv filter=lfs diff=lfs merge=lfs -text
Data.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d997c25084b512ce9ec8b5fab0a76ab28ca74b8b7216065cbe0d74b1d989604e
3
+ size 232142693
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import pandas as pd
4
+ import random
5
+ from os.path import join
6
+ from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_code, show_response, get_from_user, load_smart_df, ask_question
7
+ from dotenv import load_dotenv
8
+ from langchain_groq.chat_models import ChatGroq
9
+
10
+ load_dotenv("Groq.txt")
11
+ Groq_Token = os.environ["GROQ_API_KEY"]
12
+ models = {"mixtral": "mixtral-8x7b-32768", "llama": "llama2-70b-4096", "gemma": "gemma-7b-it"}
13
+
14
+ self_path = os.path.dirname(os.path.abspath(__file__))
15
+
16
+ # Using HTML and CSS to center the title
17
+ st.write(
18
+ """
19
+ <style>
20
+ .title {
21
+ text-align: center;
22
+ color: #17becf;
23
+ }
24
+ """,
25
+ unsafe_allow_html=True,
26
+ )
27
+
28
+ # Displaying the centered title
29
+ st.markdown("<h2 class='title'>VayuBuddy</h2>", unsafe_allow_html=True)
30
+
31
+ # os.environ["PANDASAI_API_KEY"] = "$2a$10$gbmqKotzJOnqa7iYOun8eO50TxMD/6Zw1pLI2JEoqncwsNx4XeBS2"
32
+
33
+ # with open(join(self_path, "context1.txt")) as f:
34
+ # context = f.read().strip()
35
+
36
+ # agent = load_agent(join(self_path, "app_trial_1.csv"), context)
37
+ # df = preprocess_and_load_df(join(self_path, "Data.csv"))
38
+ # inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
39
+ # inference_server = "https://api-inference.huggingface.co/models/codellama/CodeLlama-13b-hf"
40
+ # inference_server = "https://api-inference.huggingface.co/models/pandasai/bamboo-llm"
41
+
42
+ model_name = st.sidebar.selectbox("Select LLM:", ["mixtral", "llama", "gemma"])
43
+
44
+ questions = ('Custom Prompt',
45
+ 'Plot the monthly average PM2.5 for the year 2023.',
46
+ 'Which month has the highest average PM2.5 overall?',
47
+ 'Which month has the highest PM2.5 overall?',
48
+ 'Which month has the highest average PM2.5 in 2023 for Mumbai?',
49
+ 'Plot and compare monthly timeseries of pollution for Mumbai and Bengaluru.',
50
+ 'Plot the yearly average PM2.5.',
51
+ 'Plot the monthly average PM2.5 of Delhi',
52
+ 'Mumbai and Bengaluru for the year 2022.',
53
+ 'Which month has the highest pollution?',
54
+ 'Plot the monthly average PM2.5 of Delhi for the year 2022.',
55
+ 'Which city has the highest PM2.5 level in July 2022?',
56
+ 'Plot and compare monthly timeseries of PM2.5 for Mumbai and Bengaluru.',
57
+ 'Plot and compare the monthly average PM2.5 of Delhi, Mumbai and Bengaluru for the year 2022.',
58
+ 'Plot the monthly average PM2.5.',
59
+ 'Plot the monthly average PM10 for the year 2023.',
60
+ 'Which month has the highest PM2.5?',
61
+ 'Plot the monthly average PM2.5 of Delhi for the year 2022.',
62
+ 'Plot the monthly average PM2.5 of Bengaluru for the year 2022.',
63
+ 'Plot the monthly average PM2.5 of Mumbai for the year 2022.',
64
+ 'Which state has the highest average PM2.5?',
65
+ 'Plot monthly PM2.5 in Gujarat for 2023.',
66
+ 'What is the name of the month with the highest average PM2.5 overall?')
67
+
68
+ waiting_lines = ("Thinking...", "Just a moment...", "Let me think...", "Working on it...", "Processing...", "Hold on...", "One moment...", "On it...")
69
+
70
+ # agent = load_agent(df, context="", inference_server=inference_server, name=model_name)
71
+
72
+ # Initialize chat history
73
+ if "responses" not in st.session_state:
74
+ st.session_state.responses = []
75
+
76
+ # Display chat responses from history on app rerun
77
+ for response in st.session_state.responses:
78
+ if not response["no_response"]:
79
+ show_response(st, response)
80
+
81
+ show = True
82
+
83
+ if prompt := st.sidebar.selectbox("Select a Prompt:", questions):
84
+
85
+ # add a note "select custom prompt to ask your own question"
86
+ st.sidebar.info("Select 'Custom Prompt' to ask your own question.")
87
+
88
+ if prompt == 'Custom Prompt':
89
+ show = False
90
+ # React to user input
91
+ prompt = st.chat_input("Ask me anything about air quality!", key=10)
92
+ if prompt : show = True
93
+ if show :
94
+
95
+ # Add user input to chat history
96
+ response = get_from_user(prompt)
97
+ response["no_response"] = False
98
+ st.session_state.responses.append(response)
99
+
100
+ # Display user input
101
+ show_response(st, response)
102
+
103
+ no_response = False
104
+
105
+ # select random waiting line
106
+ with st.spinner(random.choice(waiting_lines)):
107
+ ran = False
108
+ for i in range(5):
109
+ llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0.1)
110
+
111
+ df_check = pd.read_csv("Data.csv")
112
+ df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
113
+ df_check = df_check.head(5)
114
+
115
+ new_line = "\n"
116
+
117
+ template = f"""```python
118
+ import pandas as pd
119
+ import matplotlib.pyplot as plt
120
+
121
+ df = pd.read_csv("Data.csv")
122
+ df["Timestamp"] = pd.to_datetime(df["Timestamp"])
123
+
124
+ # df.dtypes
125
+ {new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))}
126
+
127
+ # {prompt.strip()}
128
+ # <your code here>
129
+ ```
130
+ """
131
+
132
+ query = f"""I have a pandas dataframe data of PM2.5 and PM10.
133
+ * Frequency of data is daily.
134
+ * `pollution` generally means `PM2.5`.
135
+ * DOn't print, but save result in a variable `answer` and make it global.
136
+ * If result is a plot make it in tight layout, save it and save path in `answer`. Example: `answer='plot.png'`
137
+ * If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'`
138
+
139
+ Complete the following code.
140
+
141
+ {template}
142
+
143
+ """
144
+
145
+ answer = llm.invoke(query)
146
+ code = f"""
147
+ {template.split("```python")[1].split("```")[0]}
148
+ {answer.content.split("```python")[1].split("```")[0]}
149
+ """
150
+ # update variable `answer` when code is executed
151
+ try:
152
+ exec(code)
153
+ ran = True
154
+ no_response = False
155
+ except Exception as e:
156
+ no_response = True
157
+ exception = e
158
+
159
+ response = {"role": "assistant", "content": answer, "gen_code": code, "ex_code": code, "last_prompt": prompt, "no_response": no_response}
160
+
161
+ # Get response from agent
162
+ # response = ask_question(model_name=model_name, question=prompt)
163
+ # response = ask_agent(agent, prompt)
164
+
165
+ if ran:
166
+ break
167
+
168
+ if no_response:
169
+ st.error(f"Failed to generate right output due to the following error:\n\n{exception}")
170
+ # Add agent response to chat history
171
+ st.session_state.responses.append(response)
172
+
173
+ # Display agent response
174
+ if not no_response:
175
+ show_response(st, response)
176
+
177
+ del prompt
src.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from pandasai import Agent, SmartDataframe
4
+ from typing import Tuple
5
+ from PIL import Image
6
+ from pandasai.llm import HuggingFaceTextGen
7
+ from dotenv import load_dotenv
8
+ from langchain_groq.chat_models import ChatGroq
9
+
10
+ load_dotenv("Groq.txt")
11
+ Groq_Token = os.environ["GROQ_API_KEY"]
12
+ models = {"mixtral": "mixtral-8x7b-32768", "llama": "llama2-70b-4096", "gemma": "gemma-7b-it"}
13
+
14
+ hf_token = os.getenv("HF_READ")
15
+
16
+ def preprocess_and_load_df(path: str) -> pd.DataFrame:
17
+ df = pd.read_csv(path)
18
+ df["Timestamp"] = pd.to_datetime(df["Timestamp"])
19
+ return df
20
+
21
+ def load_agent(df: pd.DataFrame, context: str, inference_server: str, name="mixtral") -> Agent:
22
+ # llm = HuggingFaceTextGen(
23
+ # inference_server_url=inference_server,
24
+ # max_new_tokens=250,
25
+ # temperature=0.1,
26
+ # repetition_penalty=1.2,
27
+ # top_k=5,
28
+ # )
29
+ # llm.client.headers = {"Authorization": f"Bearer {hf_token}"}
30
+ llm = ChatGroq(model=models[name], api_key=os.getenv("GROQ_API"), temperature=0.1)
31
+
32
+ agent = Agent(df, config={"llm": llm, "enable_cache": False, "options": {"wait_for_model": True}})
33
+ agent.add_message(context)
34
+ return agent
35
+
36
+ def load_smart_df(df: pd.DataFrame, inference_server: str, name="mixtral") -> SmartDataframe:
37
+ # llm = HuggingFaceTextGen(
38
+ # inference_server_url=inference_server,
39
+ # )
40
+ # llm.client.headers = {"Authorization": f"Bearer {hf_token}"}
41
+ llm = ChatGroq(model=models[name], api_key=os.getenv("GROQ_API"), temperature=0.1)
42
+ df = SmartDataframe(df, config={"llm": llm, "max_retries": 5, "enable_cache": False})
43
+ return df
44
+
45
+ def get_from_user(prompt):
46
+ return {"role": "user", "content": prompt}
47
+
48
+ def ask_agent(agent: Agent, prompt: str) -> Tuple[str, str, str]:
49
+ response = agent.chat(prompt)
50
+ gen_code = agent.last_code_generated
51
+ ex_code = agent.last_code_executed
52
+ last_prompt = agent.last_prompt
53
+ return {"role": "assistant", "content": response, "gen_code": gen_code, "ex_code": ex_code, "last_prompt": last_prompt}
54
+
55
+ def decorate_with_code(response: dict) -> str:
56
+ return f"""<details>
57
+ <summary>Generated Code</summary>
58
+
59
+ ```python
60
+ {response["gen_code"]}
61
+ ```
62
+ </details>
63
+
64
+ <details>
65
+ <summary>Prompt</summary>
66
+
67
+ {response["last_prompt"]}
68
+ """
69
+
70
+ def show_response(st, response):
71
+ with st.chat_message(response["role"]):
72
+ try:
73
+ image = Image.open(response["content"])
74
+ if "gen_code" in response:
75
+ st.markdown(decorate_with_code(response), unsafe_allow_html=True)
76
+ st.image(image)
77
+ except Exception as e:
78
+ if "gen_code" in response:
79
+ display_content = decorate_with_code(response) + f"""</details>
80
+
81
+ {response["content"]}"""
82
+ else:
83
+ display_content = response["content"]
84
+ st.markdown(display_content, unsafe_allow_html=True)
85
+
86
+ def ask_question(model_name, question):
87
+ llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0.1)
88
+
89
+ df_check = pd.read_csv("Data.csv")
90
+ df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
91
+ df_check = df_check.head(5)
92
+
93
+ new_line = "\n"
94
+
95
+ template = f"""```python
96
+ import pandas as pd
97
+ import matplotlib.pyplot as plt
98
+
99
+ df = pd.read_csv("Data.csv")
100
+ df["Timestamp"] = pd.to_datetime(df["Timestamp"])
101
+
102
+ # df.dtypes
103
+ {new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))}
104
+
105
+ # {question.strip()}
106
+ # <your code here>
107
+ ```
108
+ """
109
+
110
+ query = f"""I have a pandas dataframe data of PM2.5 and PM10.
111
+ * Frequency of data is daily.
112
+ * `pollution` generally means `PM2.5`.
113
+ * Save result in a variable `answer` and make it global.
114
+ * If result is a plot, save it and save path in `answer`. Example: `answer='plot.png'`
115
+ * If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'`
116
+
117
+ Complete the following code.
118
+
119
+ {template}
120
+
121
+ """
122
+
123
+ answer = llm.invoke(query)
124
+ code = f"""
125
+ {template.split("```python")[1].split("```")[0]}
126
+ {answer.content.split("```python")[1].split("```")[0]}
127
+ """
128
+ # update variable `answer` when code is executed
129
+ exec(code)
130
+
131
+ return {"role": "assistant", "content": answer.content, "gen_code": code, "ex_code": code, "last_prompt": question}