YashB1 commited on
Commit
c114fc1
1 Parent(s): b7e7ed5

Upload 5 files

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. Data.csv +3 -0
  3. Groq.txt +1 -0
  4. app.py +247 -0
  5. requirements.txt +4 -0
  6. src.py +131 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Data.csv filter=lfs diff=lfs merge=lfs -text
Data.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d997c25084b512ce9ec8b5fab0a76ab28ca74b8b7216065cbe0d74b1d989604e
3
+ size 232142693
Groq.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ GROQ_API_KEY = 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
app.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ import pandas as pd
4
+ import random
5
+ from os.path import join
6
+ from src import preprocess_and_load_df, load_agent, ask_agent, decorate_with_code, show_response, get_from_user, load_smart_df, ask_question
7
+ from dotenv import load_dotenv
8
+ from langchain_groq.chat_models import ChatGroq
9
+
10
+ load_dotenv("Groq.txt")
11
+ Groq_Token = os.environ["GROQ_API_KEY"]
12
+ models = {"llama3":"llama3-70b-8192","mixtral": "mixtral-8x7b-32768", "llama2": "llama2-70b-4096", "gemma": "gemma-7b-it"}
13
+
14
+ self_path = os.path.dirname(os.path.abspath(__file__))
15
+
16
+ # Using HTML and CSS to center the title
17
+ st.write(
18
+ """
19
+ <style>
20
+ .title {
21
+ text-align: center;
22
+ color: #17becf;
23
+ }
24
+ """,
25
+ unsafe_allow_html=True,
26
+ )
27
+
28
+ # Displaying the centered title
29
+ st.markdown("<h2 class='title'>VayuBuddy</h2>", unsafe_allow_html=True)
30
+
31
+ # os.environ["PANDASAI_API_KEY"] = "$2a$10$gbmqKotzJOnqa7iYOun8eO50TxMD/6Zw1pLI2JEoqncwsNx4XeBS2"
32
+
33
+ # with open(join(self_path, "context1.txt")) as f:
34
+ # context = f.read().strip()
35
+
36
+ # agent = load_agent(join(self_path, "app_trial_1.csv"), context)
37
+ # df = preprocess_and_load_df(join(self_path, "Data.csv"))
38
+ # inference_server = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
39
+ # inference_server = "https://api-inference.huggingface.co/models/codellama/CodeLlama-13b-hf"
40
+ # inference_server = "https://api-inference.huggingface.co/models/pandasai/bamboo-llm"
41
+
42
+ model_name = st.sidebar.selectbox("Select LLM:", ["llama3","mixtral", "llama2", "gemma"])
43
+
44
+ questions = ('Custom Prompt',
45
+ 'Plot the monthly average PM2.5 for the year 2023.',
46
+ 'Which month has the highest average PM2.5 overall?',
47
+ 'Which month has the highest PM2.5 overall?',
48
+ 'Which month has the highest average PM2.5 in 2023 for Mumbai?',
49
+ 'Plot and compare monthly timeseries of pollution for Mumbai and Bengaluru.',
50
+ 'Plot the yearly average PM2.5.',
51
+ 'Plot the monthly average PM2.5 of Delhi',
52
+ 'Mumbai and Bengaluru for the year 2022.',
53
+ 'Which month has the highest pollution?',
54
+ 'Plot the monthly average PM2.5 of Delhi for the year 2022.',
55
+ 'Which city has the highest PM2.5 level in July 2022?',
56
+ 'Plot and compare monthly timeseries of PM2.5 for Mumbai and Bengaluru.',
57
+ 'Plot and compare the monthly average PM2.5 of Delhi, Mumbai and Bengaluru for the year 2022.',
58
+ 'Plot the monthly average PM2.5.',
59
+ 'Plot the monthly average PM10 for the year 2023.',
60
+ 'Which month has the highest PM2.5?',
61
+ 'Plot the monthly average PM2.5 of Delhi for the year 2022.',
62
+ 'Plot the monthly average PM2.5 of Bengaluru for the year 2022.',
63
+ 'Plot the monthly average PM2.5 of Mumbai for the year 2022.',
64
+ 'Which state has the highest average PM2.5?',
65
+ 'Plot monthly PM2.5 in Gujarat for 2023.',
66
+ 'What is the name of the month with the highest average PM2.5 overall?')
67
+
68
+ waiting_lines = ("Thinking...", "Just a moment...", "Let me think...", "Working on it...", "Processing...", "Hold on...", "One moment...", "On it...")
69
+
70
+ # agent = load_agent(df, context="", inference_server=inference_server, name=model_name)
71
+
72
+ # Initialize chat history
73
+ if "responses" not in st.session_state:
74
+ st.session_state.responses = []
75
+
76
+ # Display chat responses from history on app rerun
77
+ for response in st.session_state.responses:
78
+ if not response["no_response"]:
79
+ show_response(st, response)
80
+
81
+ show = True
82
+
83
+ prompt = st.sidebar.selectbox("Select a Prompt:", questions)
84
+
85
+ # add a note "select custom prompt to ask your own question"
86
+
87
+
88
+ if prompt == 'Custom Prompt':
89
+ show = False
90
+ # React to user input
91
+ prompt = st.chat_input("Ask me anything about air quality!", key=10)
92
+ if prompt:
93
+ show = True
94
+
95
+ if show:
96
+
97
+ # Add user input to chat history
98
+ response = get_from_user(prompt)
99
+ response["no_response"] = False
100
+ st.session_state.responses.append(response)
101
+
102
+ # Display user input
103
+ show_response(st, response)
104
+
105
+ no_response = False
106
+
107
+ # select random waiting line
108
+ with st.spinner(random.choice(waiting_lines)):
109
+ ran = False
110
+ for i in range(5):
111
+ llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0.1)
112
+
113
+ df_check = pd.read_csv("Data.csv")
114
+ df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
115
+ df_check = df_check.head(5)
116
+
117
+ new_line = "\n"
118
+
119
+ template = f"""```python
120
+ import pandas as pd
121
+ import matplotlib.pyplot as plt
122
+
123
+ df = pd.read_csv("Data.csv")
124
+ df["Timestamp"] = pd.to_datetime(df["Timestamp"])
125
+
126
+ # df.dtypes
127
+ {new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))}
128
+
129
+ # {prompt.strip()}
130
+ # <your code here>
131
+ ```
132
+ """
133
+
134
+ query = f"""I have a pandas dataframe data of PM2.5 and PM10.
135
+ * Frequency of data is daily.
136
+ * Number of stations in a city is determined by finding the unique stations in the dataset along with their city
137
+ * `pollution` generally means `PM2.5`.
138
+ * `pollution` generally means `PM2.5`.
139
+ * PM2.5 guidelines: India: 60, WHO: 25.
140
+ * PM10 guidelines: India: 100, WHO: 50.
141
+ * You already have df, so don't read the csv file
142
+ * Don't print, but save result in a variable `answer` and make it global.
143
+ * If result is a plot make it in tight layout, save it and save path in `answer`. Example: `answer='plot.png'`
144
+ * If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'`
145
+ * If the result is not a plot, return a csv file containing the data and the corresponding answer, as well as the data samples used
146
+ * If result is a plot, show the India and WHO guidelines in the plot.
147
+ * Whenever you do an aggregation, do it via mean and report the standard deviation and standard error, report the number of data points.
148
+ * Whenever you're reporting a floating point number, round it to 2 decimal places.
149
+ * Always report the unit of the data. Example: `The average PM2.5 is 45.67 µg/m³`
150
+ * If the result is a plot, make it using tableau 20 colour scheme and big font size.
151
+ * Consider station and sensor synonymously.
152
+ Complete the following code.
153
+
154
+ {template}
155
+
156
+ """
157
+
158
+ answer = llm.invoke(query)
159
+ code = f"""
160
+ {template.split("```python")[1].split("```")[0]}
161
+ {answer.content.split("```python")[1].split("```")[0]}
162
+ """
163
+ # update variable `answer` when code is executed
164
+ try:
165
+ exec(code)
166
+ ran = True
167
+ no_response = False
168
+ except Exception as e:
169
+ no_response = True
170
+ exception = e
171
+
172
+ response = {"role": "assistant", "content": answer, "gen_code": code, "ex_code": code, "last_prompt": prompt, "no_response": no_response}
173
+
174
+ # Get response from agent
175
+ # response = ask_question(model_name=model_name, question=prompt)
176
+ # response = ask_agent(agent, prompt)
177
+
178
+ if ran:
179
+ break
180
+
181
+ if no_response:
182
+ st.error(f"Failed to generate right output due to the following error:\n\n{exception}")
183
+ # Add agent response to chat history
184
+ st.session_state.responses.append(response)
185
+
186
+ # Display agent response
187
+ if not no_response:
188
+ show_response(st, response)
189
+
190
+ del prompt
191
+
192
+
193
+
194
+ st.sidebar.info("\nCalculator")
195
+ Pollutant = ["O3", "PM2.5", "PM10", "CO", "SO2", "NO2"]
196
+ Calculator_index = st.sidebar.selectbox("Select a Prompt:", Pollutant)
197
+
198
+ if Calculator_index:
199
+ concentration = st.sidebar.number_input(f"Enter {Calculator_index} concentration (µg/m³):")
200
+ calculate_button = st.sidebar.button("Calculate")
201
+ if concentration:
202
+ if calculate_button:
203
+ # Define breakpoints and AQI categories for the selected pollutant
204
+ breakpoints_low = {
205
+ "O3": [0, 50, 100, 168, 208, 748],
206
+ "PM2.5": [0, 30, 60, 90, 120, 250],
207
+ "PM10": [0, 50, 100, 250, 350, 430],
208
+ "CO": [0, 1000, 2000, 10000, 17000, 34000],
209
+ "SO2": [0, 40, 80, 380, 800, 1600],
210
+ "NO2": [0, 40, 80, 180, 280, 400]
211
+ }
212
+
213
+ breakpoints_high = {
214
+ "O3": [50, 100, 168, 208, 748,1000],
215
+ "PM2.5": [30, 60, 90, 120, 250,1000],
216
+ "PM10": [50, 100, 250, 350, 430,1000],
217
+ "CO": [1000, 2000, 10000, 17000, 34000,50000],
218
+ "SO2": [40, 80, 380, 800, 1600,2000],
219
+ "NO2": [ 40, 80, 180, 280, 400,1000]
220
+ }
221
+ # Define corresponding AQI categories
222
+ categories_low= [0, 50, 100, 200, 300, 400]
223
+ categories_high = [50, 100, 200, 300, 400,500]
224
+
225
+ # Find the appropriate AQI category based on concentration
226
+
227
+ for i in range(len(breakpoints_high[Calculator_index])):
228
+ if concentration <= breakpoints_high[Calculator_index][i]:
229
+ BPHI = breakpoints_high[Calculator_index][i]
230
+ IHI = categories_high[i]
231
+ # Calculate AQI using India formula
232
+ #AQI = ((categories[i] - categories[i-1]) / (breakpoints[Calculator_index][i] - breakpoints[Calculator_index][i-1])) * (concentration - breakpoints[Calculator_index][i-1]) + categories[i-1]
233
+ #st.sidebar.write(f"The Air Quality Index (AQI) for {Calculator_index} is: {AQI}")
234
+ break
235
+
236
+ for i in range(len(breakpoints_low[Calculator_index])):
237
+ if concentration >= breakpoints_low[Calculator_index][i]:
238
+ BPLI = breakpoints_low[Calculator_index][i]
239
+ ILI = categories_low[i]
240
+ # Calculate AQI using India formula
241
+ #AQI = ((categories[i] - categories[i-1]) / (breakpoints[Calculator_index][i] - breakpoints[Calculator_index][i-1])) * (concentration - breakpoints[Calculator_index][i-1]) + categories[i-1]
242
+ #st.sidebar.write(f"The Air Quality Index (AQI) for {Calculator_index} is: {AQI}")
243
+ break
244
+
245
+ AQI = ((IHI - ILI) / (BPHI - BPLI)) * (round(concentration) - BPLI) + ILI
246
+ st.sidebar.write(f"The Air Quality Index (AQI) for {Calculator_index} is: {AQI}")
247
+
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit==0.88.0
2
+ pandas==1.3.3
3
+ langchain-groq==1.0.0
4
+ python-dotenv==0.19.1
src.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from pandasai import Agent, SmartDataframe
4
+ from typing import Tuple
5
+ from PIL import Image
6
+ from pandasai.llm import HuggingFaceTextGen
7
+ from dotenv import load_dotenv
8
+ from langchain_groq.chat_models import ChatGroq
9
+
10
+ load_dotenv("Groq.txt")
11
+ Groq_Token = os.environ["GROQ_API_KEY"]
12
+ models = {"mixtral": "mixtral-8x7b-32768", "llama": "llama2-70b-4096", "gemma": "gemma-7b-it"}
13
+
14
+ hf_token = os.getenv("HF_READ")
15
+
16
+ def preprocess_and_load_df(path: str) -> pd.DataFrame:
17
+ df = pd.read_csv(path)
18
+ df["Timestamp"] = pd.to_datetime(df["Timestamp"])
19
+ return df
20
+
21
+ def load_agent(df: pd.DataFrame, context: str, inference_server: str, name="mixtral") -> Agent:
22
+ # llm = HuggingFaceTextGen(
23
+ # inference_server_url=inference_server,
24
+ # max_new_tokens=250,
25
+ # temperature=0.1,
26
+ # repetition_penalty=1.2,
27
+ # top_k=5,
28
+ # )
29
+ # llm.client.headers = {"Authorization": f"Bearer {hf_token}"}
30
+ llm = ChatGroq(model=models[name], api_key=os.getenv("GROQ_API"), temperature=0.1)
31
+
32
+ agent = Agent(df, config={"llm": llm, "enable_cache": False, "options": {"wait_for_model": True}})
33
+ agent.add_message(context)
34
+ return agent
35
+
36
+ def load_smart_df(df: pd.DataFrame, inference_server: str, name="mixtral") -> SmartDataframe:
37
+ # llm = HuggingFaceTextGen(
38
+ # inference_server_url=inference_server,
39
+ # )
40
+ # llm.client.headers = {"Authorization": f"Bearer {hf_token}"}
41
+ llm = ChatGroq(model=models[name], api_key=os.getenv("GROQ_API"), temperature=0.1)
42
+ df = SmartDataframe(df, config={"llm": llm, "max_retries": 5, "enable_cache": False})
43
+ return df
44
+
45
+ def get_from_user(prompt):
46
+ return {"role": "user", "content": prompt}
47
+
48
+ def ask_agent(agent: Agent, prompt: str) -> Tuple[str, str, str]:
49
+ response = agent.chat(prompt)
50
+ gen_code = agent.last_code_generated
51
+ ex_code = agent.last_code_executed
52
+ last_prompt = agent.last_prompt
53
+ return {"role": "assistant", "content": response, "gen_code": gen_code, "ex_code": ex_code, "last_prompt": last_prompt}
54
+
55
+ def decorate_with_code(response: dict) -> str:
56
+ return f"""<details>
57
+ <summary>Generated Code</summary>
58
+
59
+ ```python
60
+ {response["gen_code"]}
61
+ ```
62
+ </details>
63
+
64
+ <details>
65
+ <summary>Prompt</summary>
66
+
67
+ {response["last_prompt"]}
68
+ """
69
+
70
+ def show_response(st, response):
71
+ with st.chat_message(response["role"]):
72
+ try:
73
+ image = Image.open(response["content"])
74
+ if "gen_code" in response:
75
+ st.markdown(decorate_with_code(response), unsafe_allow_html=True)
76
+ st.image(image)
77
+ except Exception as e:
78
+ if "gen_code" in response:
79
+ display_content = decorate_with_code(response) + f"""</details>
80
+
81
+ {response["content"]}"""
82
+ else:
83
+ display_content = response["content"]
84
+ st.markdown(display_content, unsafe_allow_html=True)
85
+
86
+ def ask_question(model_name, question):
87
+ llm = ChatGroq(model=models[model_name], api_key=os.getenv("GROQ_API"), temperature=0.1)
88
+
89
+ df_check = pd.read_csv("Data.csv")
90
+ df_check["Timestamp"] = pd.to_datetime(df_check["Timestamp"])
91
+ df_check = df_check.head(5)
92
+
93
+ new_line = "\n"
94
+
95
+ template = f"""```python
96
+ import pandas as pd
97
+ import matplotlib.pyplot as plt
98
+
99
+ df = pd.read_csv("Data.csv")
100
+ df["Timestamp"] = pd.to_datetime(df["Timestamp"])
101
+
102
+ # df.dtypes
103
+ {new_line.join(map(lambda x: '# '+x, str(df_check.dtypes).split(new_line)))}
104
+
105
+ # {question.strip()}
106
+ # <your code here>
107
+ ```
108
+ """
109
+
110
+ query = f"""I have a pandas dataframe data of PM2.5 and PM10.
111
+ * Frequency of data is daily.
112
+ * `pollution` generally means `PM2.5`.
113
+ * Save result in a variable `answer` and make it global.
114
+ * If result is a plot, save it and save path in `answer`. Example: `answer='plot.png'`
115
+ * If result is not a plot, save it as a string in `answer`. Example: `answer='The city is Mumbai'`
116
+
117
+ Complete the following code.
118
+
119
+ {template}
120
+
121
+ """
122
+
123
+ answer = llm.invoke(query)
124
+ code = f"""
125
+ {template.split("```python")[1].split("```")[0]}
126
+ {answer.content.split("```python")[1].split("```")[0]}
127
+ """
128
+ # update variable `answer` when code is executed
129
+ exec(code)
130
+
131
+ return {"role": "assistant", "content": answer.content, "gen_code": code, "ex_code": code, "last_prompt": question}