CobaltZvc commited on
Commit
2dce196
1 Parent(s): ac81094

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -51
app.py CHANGED
@@ -213,61 +213,61 @@ if Usage == 'Questions based on custom CSV data':
213
 
214
  temp = st.slider('Temperature: ', 0.0, 1.0, 0.0)
215
 
216
- if st.checkbox('Use Prompt'):
217
- with st.form(key='columns_in_form2'):
218
- col3, col4 = st.columns(2)
219
- with col3:
220
- userPrompt = st.text_area("Input Prompt",'Enter Natural Language Query')
221
- submitButton = st.form_submit_button(label = 'Submit')
222
- if submitButton:
223
- try:
224
- col_p ="Create SQL statement from instruction. "+ext+" " " (" + column +")." +" Request:" + userPrompt + "SQL statement:"
225
- result = gpt3(col_p)
226
- except:
227
- results = gpt3(userPrompt)
228
- st.success('loaded')
229
- with col4:
230
  try:
231
- sqlOutput = st.text_area('SQL Query', value=gpt3(col_p))
232
- warning(sqlOutput)
233
- cars=pd.read_csv('cars.csv')
234
- result_tab2=ps.sqldf(sqlOutput)
235
- st.write(result_tab2)
236
- with open("fewshot_matplot.txt", "r") as file:
237
- text_plot = file.read()
238
-
239
- result_tab = result_tab2.reset_index(drop=True)
240
- result_tab_string = result_tab.to_string()
241
- gr_prompt = text_plot + userPrompt + result_tab_string + "Plot graph for: "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
- if len(gr_prompt) > 4097:
244
- st.write('OVERWHELMING DATA!!! You have given me more than 4097 tokens! ^_^')
245
- st.write('As of today, the NLP model text-davinci-003 that I run on takes in inputs that have less than 4097 tokens. Kindly retry ^_^')
246
 
247
- elif len(result_tab2.columns) < 2:
248
- st.write("I need more data to conduct analysis and provide visualizations for you... ^_^")
249
-
250
  else:
251
- st.success("Plotting...")
252
- response_graph = openai.Completion.create(
253
- engine="text-davinci-003",
254
- prompt = gr_prompt,
255
- max_tokens=1024,
256
- n=1,
257
- stop=None,
258
- temperature=0.5,
259
- )
260
-
261
- if response_graph['choices'][0]['text'] != "":
262
- print(response_graph['choices'][0]['text'])
263
- exec(response_graph['choices'][0]['text'])
264
-
265
- else:
266
- print('Retry! Graph could not be plotted *_*')
267
-
268
- except:
269
- pass
270
-
271
 
272
  elif Usage == 'Random Questions':
273
  st.text('''You can ask me for:
 
213
 
214
  temp = st.slider('Temperature: ', 0.0, 1.0, 0.0)
215
 
216
+
217
+ with st.form(key='columns_in_form2'):
218
+ col3, col4 = st.columns(2)
219
+ with col3:
220
+ userPrompt = st.text_area("Input Prompt",'Enter Natural Language Query')
221
+ submitButton = st.form_submit_button(label = 'Submit')
222
+ if submitButton:
 
 
 
 
 
 
 
223
  try:
224
+ col_p ="Create SQL statement from instruction. "+ext+" " " (" + column +")." +" Request:" + userPrompt + "SQL statement:"
225
+ result = gpt3(col_p)
226
+ except:
227
+ results = gpt3(userPrompt)
228
+ st.success('loaded')
229
+ with col4:
230
+ try:
231
+ sqlOutput = st.text_area('SQL Query', value=gpt3(col_p))
232
+ warning(sqlOutput)
233
+ cars=pd.read_csv('cars.csv')
234
+ result_tab2=ps.sqldf(sqlOutput)
235
+ st.write(result_tab2)
236
+ with open("fewshot_matplot.txt", "r") as file:
237
+ text_plot = file.read()
238
+
239
+ result_tab = result_tab2.reset_index(drop=True)
240
+ result_tab_string = result_tab.to_string()
241
+ gr_prompt = text_plot + userPrompt + result_tab_string + "Plot graph for: "
242
+
243
+ if len(gr_prompt) > 4097:
244
+ st.write('OVERWHELMING DATA!!! You have given me more than 4097 tokens! ^_^')
245
+ st.write('As of today, the NLP model text-davinci-003 that I run on takes in inputs that have less than 4097 tokens. Kindly retry ^_^')
246
+
247
+ elif len(result_tab2.columns) < 2:
248
+ st.write("I need more data to conduct analysis and provide visualizations for you... ^_^")
249
+
250
+ else:
251
+ st.success("Plotting...")
252
+ response_graph = openai.Completion.create(
253
+ engine="text-davinci-003",
254
+ prompt = gr_prompt,
255
+ max_tokens=1024,
256
+ n=1,
257
+ stop=None,
258
+ temperature=0.5,
259
+ )
260
 
261
+ if response_graph['choices'][0]['text'] != "":
262
+ print(response_graph['choices'][0]['text'])
263
+ exec(response_graph['choices'][0]['text'])
264
 
 
 
 
265
  else:
266
+ print('Retry! Graph could not be plotted *_*')
267
+
268
+ except:
269
+ pass
270
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
 
272
  elif Usage == 'Random Questions':
273
  st.text('''You can ask me for: