CobaltZvc commited on
Commit
552b2ab
1 Parent(s): 770f7ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +232 -523
app.py CHANGED
@@ -24,48 +24,12 @@ from google.oauth2 import service_account
24
  from googleapiclient.discovery import build
25
  import wget
26
  import urllib.request
27
- import sqlite3
28
- import pandas as pd
29
- import pandasql as ps
30
- # import sounddevice as sd
31
- # import soundfile as sf
32
 
33
- def clean(value):
34
- val = value.replace("'",'').replace("[",'').replace("]",'')
35
- return val
36
 
37
  def save_uploadedfile(uploadedfile):
38
  with open(uploadedfile.name,"wb") as f:
39
  f.write(uploadedfile.getbuffer())
40
 
41
- def gpt3(texts):
42
- # openai.api_key = os.environ["Secret"]
43
- openai.api_key = st.secrets['OPENAI_KEY'] #'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'#'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' #'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'
44
- response = openai.Completion.create(
45
- engine="text-davinci-003",
46
- prompt= texts,
47
- temperature=temp,
48
- max_tokens=750,
49
- top_p=1,
50
- frequency_penalty=0.0,
51
- presence_penalty=0.0,
52
- stop = (";", "/*", "</code>"))
53
- x = response.choices[0].text
54
- return x
55
-
56
- def warning(sqlOutput):
57
- dl = []
58
- lst = ['DELETE','DROP','TRUNCATE','MERGE','ALTER','UPDATE','INSERT']
59
- op2 = " ".join(sqlOutput.split())
60
- op3 = op2.split(' ')
61
- op4 = list(map(lambda x: x.upper(), op3))
62
- for i in op4:
63
- if i in lst:
64
- dl.append(i)
65
- for i in dl:
66
- st.warning("This query will " + i + " the data ",icon="⚠️")
67
-
68
-
69
  stability_api = client.StabilityInference(
70
  key=st.secrets["STABILITY_KEY"], #os.environ("STABILITY_KEY"), # key=os.environ['STABILITY_KEY'], # API Key reference.
71
  verbose=True, # Print debug messages.
@@ -253,47 +217,6 @@ def g_sheet_log(myinput, output):
253
  ).execute()
254
 
255
  openai.api_key = st.secrets["OPENAI_KEY"]
256
- # duration = 5
257
- # fs = 44100
258
- # channels = 1
259
- # filename = "output.wav"
260
-
261
- # def record_audio():
262
- # myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
263
- # sd.wait()
264
- # sf.write(filename, myrecording, fs)
265
- # return filename
266
- # p = pyaudio.PyAudio()
267
-
268
- # # Open the microphone stream
269
- # stream = p.open(format=FORMAT,
270
- # channels=CHANNELS,
271
- # rate=RATE,
272
- # input=True,
273
- # frames_per_buffer=CHUNK)
274
-
275
- # # Record the audio
276
- # frames = []
277
- # for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
278
- # data = stream.read(CHUNK)
279
- # frames.append(data)
280
-
281
- # # Close the microphone stream
282
- # stream.stop_stream()
283
- # stream.close()
284
- # p.terminate()
285
-
286
- # # Save the recorded audio to a WAV file
287
- # wf = wave.open("output.mp3", "wb")
288
- # wf.setnchannels(CHANNELS)
289
- # wf.setsampwidth(p.get_sample_size(FORMAT))
290
- # wf.setframerate(RATE)
291
- # wf.writeframes(b"".join(frames))
292
- # wf.close()
293
-
294
- # # Return the path to the recorded audio file
295
- # return "output.mp3"
296
-
297
 
298
  def openai_response(PROMPT):
299
  response = openai.Image.create(
@@ -306,392 +229,186 @@ def openai_response(PROMPT):
306
  st.title("Hi! :red[HyperBot] here!!🤖⭐️")
307
  st.title("Go on ask me anything!!")
308
 
309
- st.text('''
310
- ⭐️ HyperBot is your virtual assistant powered by Whisper / chatgpt / internet / Dall-E / OpenAI embeddings -
311
- the perfect companion for you. With HyperBot, you can ask anything you ask internet everyday . Get answers
312
- to questions about the weather , stocks 📈, news📰, and more! Plus, you can also generate 🖌️ paintings,
313
- drawings, abstract art 🎨, play music 🎵 or videos, create tweets 🐦 and posts 📝, and compose emails 📧 -
314
- all with the help of HyperBot! 🤖
 
 
 
315
  ''')
316
 
317
- option_ = ['Ask me anything!😊','Ask me anything (CSV file data)!📊']
318
- Usage = st.selectbox('Select an option:', option_)
319
-
320
- if Usage == 'Ask me anything (CSV file data)!📊':
321
- st.text('''
322
- You can use your own custom csv files to test this feature or
323
- you can use the sample csv file which contains data about cars.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
 
325
- Example question:
326
- - How many cars were manufactured each year between 2000 to 2008?
327
- ''')
328
-
329
- option = ['Sample_Cars_csv','Upload_csv']
330
- res = st.selectbox('Select from below options:',option)
331
- if res == 'Upload_csv':
332
- uploaded_file = st.file_uploader("Add dataset (csv) ",type=['csv'])
333
- if uploaded_file is not None:
334
- st.write("File Uploaded")
335
- file_name=uploaded_file.name
336
- ext=file_name.split(".")[0]
337
- st.write(ext)
338
- df=pd.read_csv(uploaded_file)
339
- save_uploadedfile(uploaded_file)
340
- col= df.columns
341
  try:
342
- columns = str((df.columns).tolist())
343
- column = clean(columns)
344
- st.write('Columns:' )
345
- st.text(col)
346
- except:
347
- pass
348
-
349
- temp = st.slider('Temperature: ', 0.0, 1.0, 0.0)
350
-
351
- with st.form("Form Layout Upload_csv"):
352
- userPrompt = st.text_area("Input Prompt",'Enter Natural Language Query')
353
- submitButton = st.form_submit_button(label = 'Submit')
354
-
355
- if submitButton:
356
- try:
357
- col_p ="Create SQL statement from instruction. "+ext+" " " (" + column +")." +" Request:" + userPrompt + "SQL statement:"
358
- result = gpt3(col_p)
359
- sqlOutput = result #st.text_area('SQL Query', value=gpt3(col_p))
360
- warning(sqlOutput)
361
- result_tab2=ps.sqldf(sqlOutput)
362
- st.write(result_tab2)
363
- with open("fewshot_matplot.txt", "r") as file:
364
- text_plot = file.read()
365
-
366
- result_tab = result_tab2.reset_index(drop=True)
367
- result_tab_string = result_tab.to_string()
368
- gr_prompt = text_plot + userPrompt + result_tab_string + "Plot graph for: "
369
-
370
- if len(gr_prompt) > 4097:
371
- st.write('OVERWHELMING DATA!!! You have given me more than 4097 tokens! ^_^')
372
- st.write('As of today, the NLP model text-davinci-003/gpt-3.5-turbo that I run on takes in inputs that have less than 4097 tokens. Kindly retry ^_^')
373
-
374
- elif len(result_tab2.columns) < 2:
375
- st.write("I need more data to conduct analysis and provide visualizations for you... ^_^")
376
-
377
- else:
378
- st.success("Plotting...")
379
- response_graph = openai.Completion.create(
380
- engine="text-davinci-003",
381
- prompt = gr_prompt,
382
- max_tokens=1024,
383
- n=1,
384
- stop=None,
385
- temperature=0.5,
386
- )
387
-
388
- if response_graph['choices'][0]['text'] != "":
389
- print(response_graph['choices'][0]['text'])
390
- exec(response_graph['choices'][0]['text'])
391
-
392
- else:
393
- print('Retry! Graph could not be plotted *_*')
394
-
395
- except:
396
- results = gpt3(userPrompt)
397
- st.success('loaded')
398
-
399
- elif res == "Sample_Cars_csv":
400
- df = pd.read_csv('cars.csv')
401
- col= df.columns
402
- try:
403
- columns = str((df.columns).tolist())
404
- column = clean(columns)
405
- st.write('Columns:' )
406
- st.text(col)
407
- except:
408
- pass
409
-
410
- temp = st.slider('Temperature: ', 0.0, 1.0, 0.0)
411
-
412
- with st.form("Form Layout Custom_csv"):
413
- userPrompt = st.text_area("Input Prompt",'Enter Natural Language Query')
414
- submitButton = st.form_submit_button(label = 'Submit')
415
-
416
- if submitButton:
417
  try:
418
- col_p ="Create SQL statement from instruction. "+ext+" " " (" + column +")." +" Request:" + userPrompt + "SQL statement:"
419
- result = gpt3(col_p)
420
- sqlOutput = result #st.text_area('SQL Query', value=gpt3(col_p))
421
- warning(sqlOutput)
422
- result_tab2=ps.sqldf(sqlOutput)
423
- st.write(result_tab2)
424
-
425
- with open("fewshot_matplot.txt", "r") as file:
426
- text_plot = file.read()
427
-
428
- result_tab = result_tab2.reset_index(drop=True)
429
- result_tab_string = result_tab.to_string()
430
- gr_prompt = text_plot + userPrompt + result_tab_string + "Plot graph for: "
431
-
432
- if len(gr_prompt) > 4097:
433
- st.write('OVERWHELMING DATA!!! You have given me more than 4097 tokens! ^_^')
434
- st.write('As of today, the NLP model text-davinci-003 that I run on takes in inputs that have less than 4097 tokens. Kindly retry ^_^')
435
-
436
- elif len(result_tab2.columns) < 2:
437
- st.write("I need more data to conduct analysis and provide visualizations for you... ^_^")
438
-
439
- else:
440
- st.success("Plotting...")
441
- response_graph = openai.Completion.create(
442
- engine="text-davinci-003",
443
- prompt = gr_prompt,
444
- max_tokens=1024,
445
- n=1,
446
- stop=None,
447
- temperature=0.5,
448
- )
449
-
450
- if response_graph['choices'][0]['text'] != "":
451
- print(response_graph['choices'][0]['text'])
452
- exec(response_graph['choices'][0]['text'])
453
-
454
- else:
455
- print('Retry! Graph could not be plotted *_*')
456
- except:
457
- results = gpt3(userPrompt)
458
- st.success('loaded')
459
-
460
-
461
- elif Usage == 'Ask me anything!😊':
462
- st.text('''You can ask me:
463
- 1. All the things you ask ChatGPT.
464
- 2. Generating paintings, drawings, abstract art.
465
- 3. Music or Videos
466
- 4. Weather
467
- 5. Stocks
468
- 6. Current Affairs and News.
469
- 7. Create or compose tweets or Linkedin posts or email.''')
470
-
471
- Input_type = st.radio(
472
- "**Input type:**",
473
- ('TEXT', 'SPEECH')
474
- )
475
-
476
- if Input_type == 'TEXT':
477
- st.write('**You are now in Text input mode**')
478
- mytext = st.text_input('**Go on! Ask me anything:**')
479
- if st.button("SUBMIT"):
480
- question=mytext
481
- response = openai.Completion.create(
482
- model="text-davinci-003",
483
- prompt=f'''Your name is HyperBot and knowledge cutoff date is 2021-09, and you are not aware of any events after that time. if the
484
- Answer to following questions is not from your knowledge base or in case of queries like weather
485
- updates / stock updates / current news or people which requires you to have internet connection then print i don't have access to internet to answer your question,
486
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
487
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
488
- if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
489
- if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
490
- \nQuestion-{question}
491
- \nAnswer -''',
492
- temperature=0.49,
493
- max_tokens=256,
494
- top_p=1,
495
- frequency_penalty=0,
496
- presence_penalty=0
497
- )
498
- string_temp=response.choices[0].text
499
-
500
- if ("gen_draw" in string_temp):
501
- try:
502
- try:
503
- wget.download(openai_response(prompt))
504
- img2 = Image.open(wget.download(openai_response(prompt)))
505
- img2.show()
506
- rx = 'Image returned'
507
- g_sheet_log(mytext, rx)
508
- except:
509
- urllib.request.urlretrieve(openai_response(prompt),"img_ret.png")
510
- img = Image.open("img_ret.png")
511
- img.show()
512
- rx = 'Image returned'
513
- g_sheet_log(mytext, rx)
514
  except:
515
- # Set up our initial generation parameters.
516
- answers = stability_api.generate(
517
- prompt = mytext,
518
- seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
519
- # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
520
- # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
521
- steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
522
- cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
523
- # Setting this value higher increases the strength in which it tries to match your prompt.
524
- # Defaults to 7.0 if not specified.
525
- width=512, # Generation width, defaults to 512 if not included.
526
- height=512, # Generation height, defaults to 512 if not included.
527
- samples=1, # Number of images to generate, defaults to 1 if not included.
528
- sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
529
- # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
530
- # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
531
- )
532
-
533
- # Set up our warning to print to the console if the adult content classifier is tripped.
534
- # If adult content classifier is not tripped, save generated images.
535
- for resp in answers:
536
- for artifact in resp.artifacts:
537
- if artifact.finish_reason == generation.FILTER:
538
- warnings.warn(
539
- "Your request activated the API's safety filters and could not be processed."
540
- "Please modify the prompt and try again.")
541
- if artifact.type == generation.ARTIFACT_IMAGE:
542
- img = Image.open(io.BytesIO(artifact.binary))
543
- st.image(img)
544
- img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
545
- rx = 'Image returned'
546
- g_sheet_log(mytext, rx)
547
-
548
- # except:
549
- # st.write('image is being generated please wait...')
550
- # def extract_image_description(input_string):
551
- # return input_string.split('gen_draw("')[1].split('")')[0]
552
- # prompt=extract_image_description(string_temp)
553
- # # model_id = "CompVis/stable-diffusion-v1-4"
554
- # model_id='runwayml/stable-diffusion-v1-5'
555
- # device = "cuda"
556
-
557
-
558
- # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
559
- # pipe = pipe.to(device)
560
-
561
- # # prompt = "a photo of an astronaut riding a horse on mars"
562
- # image = pipe(prompt).images[0]
563
-
564
- # image.save("astronaut_rides_horse.png")
565
- # st.image(image)
566
- # # image
567
-
568
- elif ("vid_tube" in string_temp):
569
- s = Search(mytext)
570
- search_res = s.results
571
- first_vid = search_res[0]
572
- print(first_vid)
573
- string = str(first_vid)
574
- video_id = string[string.index('=') + 1:-1]
575
- # print(video_id)
576
- YoutubeURL = "https://www.youtube.com/watch?v="
577
- OurURL = YoutubeURL + video_id
578
- st.write(OurURL)
579
- st_player(OurURL)
580
- ry = 'Youtube link and video returned'
581
- g_sheet_log(mytext, ry)
582
-
583
- elif ("don't" in string_temp or "internet" in string_temp):
584
- st.write('searching internet ')
585
- search_internet(question)
586
- rz = 'Internet result returned'
587
- g_sheet_log(mytext, string_temp)
588
-
589
- else:
590
- st.write(string_temp)
591
- g_sheet_log(mytext, string_temp)
592
-
593
- elif Input_type == 'SPEECH':
594
- option_speech = st.selectbox(
595
- 'Choose from below: (Options for Transcription)',
596
- ('Use Microphone', 'OpenAI Whisper (Upload audio file)')
597
- )
598
 
599
- if option_speech == 'Use Microphone':
600
- stt_button = Button(label="Speak", width=100)
601
- stt_button.js_on_event("button_click", CustomJS(code="""
602
- var recognition = new webkitSpeechRecognition();
603
- recognition.continuous = true;
604
- recognition.interimResults = true;
605
-
606
- recognition.onresult = function (e) {
607
- var value = "";
608
- for (var i = e.resultIndex; i < e.results.length; ++i) {
609
- if (e.results[i].isFinal) {
610
- value += e.results[i][0].transcript;
611
- }
612
- }
613
- if ( value != "") {
614
- document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
615
  }
616
  }
617
- recognition.start();
618
- """))
619
-
620
- result = streamlit_bokeh_events(
621
- stt_button,
622
- events="GET_TEXT",
623
- key="listen",
624
- refresh_on_update=False,
625
- override_height=75,
626
- debounce_time=0)
627
-
628
- if result:
629
- if "GET_TEXT" in result:
630
- question = result.get("GET_TEXT")
631
- response = openai.Completion.create(
632
- model="text-davinci-003",
633
- prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
634
- Answer to following questions is not from your knowledge base or in case of queries like weather
635
- updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
636
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
637
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
638
- \nQuestion-{question}
639
- \nAnswer -''',
640
- temperature=0.49,
641
- max_tokens=256,
642
- top_p=1,
643
- frequency_penalty=0,
644
- presence_penalty=0
645
- )
646
- string_temp=response.choices[0].text
647
-
648
- if ("gen_draw" in string_temp):
649
- st.write('*image is being generated please wait..* ')
650
- def extract_image_description(input_string):
651
- return input_string.split('gen_draw("')[1].split('")')[0]
652
- prompt=extract_image_description(string_temp)
653
- # model_id = "CompVis/stable-diffusion-v1-4"
654
- model_id='runwayml/stable-diffusion-v1-5'
655
- device = "cuda"
656
-
657
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
658
- pipe = pipe.to(device)
659
 
660
- # prompt = "a photo of an astronaut riding a horse on mars"
661
- image = pipe(prompt).images[0]
662
-
663
- image.save("astronaut_rides_horse.png")
664
- st.image(image)
665
- # image
666
-
667
- elif ("vid_tube" in string_temp):
668
- s = Search(question)
669
- search_res = s.results
670
- first_vid = search_res[0]
671
- print(first_vid)
672
- string = str(first_vid)
673
- video_id = string[string.index('=') + 1:-1]
674
- # print(video_id)
675
- YoutubeURL = "https://www.youtube.com/watch?v="
676
- OurURL = YoutubeURL + video_id
677
- st.write(OurURL)
678
- st_player(OurURL)
679
 
680
- elif ("don't" in string_temp or "internet" in string_temp ):
681
- st.write('*searching internet*')
682
- search_internet(question)
683
- else:
684
- st.write(string_temp)
685
-
686
- elif option_speech == 'OpenAI Whisper (Upload audio file)':
687
- audio_file = st.file_uploader("Upload Audio file",type=['wav', 'mp3'])
688
- if audio_file is not None:
689
- # file = open(audio_file, "rb")
690
- st.audio(audio_file)
691
- transcription = openai.Audio.transcribe("whisper-1", audio_file)
692
- st.write(transcription["text"])
693
- result = transcription["text"]
694
- question = result
695
  response = openai.Completion.create(
696
  model="text-davinci-003",
697
  prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
@@ -707,7 +424,6 @@ elif Usage == 'Ask me anything!😊':
707
  frequency_penalty=0,
708
  presence_penalty=0
709
  )
710
-
711
  string_temp=response.choices[0].text
712
 
713
  if ("gen_draw" in string_temp):
@@ -747,77 +463,70 @@ elif Usage == 'Ask me anything!😊':
747
  search_internet(question)
748
  else:
749
  st.write(string_temp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
 
751
- # except:
752
- # pass
753
- # st.text("Record your audio, **max length - 5 seconds**")
754
- # if st.button("Record"):
755
- # st.write("Recording...")
756
- # audio_file = record_audio()
757
- # st.write("Recording complete.")
758
- # file = open(audio_file, "rb")
759
-
760
- # # Play the recorded audio
761
- # st.audio(audio_file)
 
 
 
 
 
 
 
 
 
762
 
763
- # transcription = openai.Audio.transcribe("whisper-1", file)
764
- # result = transcription["text"]
765
- # st.write(f"Fetched from audio - {result}")
766
-
767
- # question = result
768
- # response = openai.Completion.create(
769
- # model="text-davinci-003",
770
- # prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
771
- # Answer to following questions is not from your knowledge base or in case of queries like weather
772
- # updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
773
- # if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
774
- # if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
775
- # \nQuestion-{question}
776
- # \nAnswer -''',
777
- # temperature=0.49,
778
- # max_tokens=256,
779
- # top_p=1,
780
- # frequency_penalty=0,
781
- # presence_penalty=0
782
- # )
783
- # string_temp=response.choices[0].text
784
-
785
- # if ("gen_draw" in string_temp):
786
- # st.write('*image is being generated please wait..* ')
787
- # def extract_image_description(input_string):
788
- # return input_string.split('gen_draw("')[1].split('")')[0]
789
- # prompt=extract_image_description(string_temp)
790
- # # model_id = "CompVis/stable-diffusion-v1-4"
791
- # model_id='runwayml/stable-diffusion-v1-5'
792
- # device = "cuda"
793
-
794
- # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
795
- # pipe = pipe.to(device)
796
-
797
- # # prompt = "a photo of an astronaut riding a horse on mars"
798
- # image = pipe(prompt).images[0]
799
-
800
- # image.save("astronaut_rides_horse.png")
801
- # st.image(image)
802
- # # image
803
-
804
- # elif ("vid_tube" in string_temp):
805
- # s = Search(question)
806
- # search_res = s.results
807
- # first_vid = search_res[0]
808
- # print(first_vid)
809
- # string = str(first_vid)
810
- # video_id = string[string.index('=') + 1:-1]
811
- # # print(video_id)
812
- # YoutubeURL = "https://www.youtube.com/watch?v="
813
- # OurURL = YoutubeURL + video_id
814
- # st.write(OurURL)
815
- # st_player(OurURL)
816
-
817
- # elif ("don't" in string_temp or "internet" in string_temp ):
818
- # st.write('*searching internet*')
819
- # search_internet(question)
820
- # else:
821
- # st.write(string_temp)
822
  else:
823
  pass
 
24
  from googleapiclient.discovery import build
25
  import wget
26
  import urllib.request
 
 
 
 
 
27
 
 
 
 
28
 
29
  def save_uploadedfile(uploadedfile):
30
  with open(uploadedfile.name,"wb") as f:
31
  f.write(uploadedfile.getbuffer())
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  stability_api = client.StabilityInference(
34
  key=st.secrets["STABILITY_KEY"], #os.environ("STABILITY_KEY"), # key=os.environ['STABILITY_KEY'], # API Key reference.
35
  verbose=True, # Print debug messages.
 
217
  ).execute()
218
 
219
  openai.api_key = st.secrets["OPENAI_KEY"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
 
221
  def openai_response(PROMPT):
222
  response = openai.Image.create(
 
229
  st.title("Hi! :red[HyperBot] here!!🤖⭐️")
230
  st.title("Go on ask me anything!!")
231
 
232
+ st.write('''
233
+ ⭐️ HyperBot is your virtual assistant powered by Whisper /
234
+ chatgpt / internet / Dall-E / OpenAI embeddings - the perfect
235
+ companion for you. With HyperBot, you can ask anything you ask
236
+ internet everyday . Get answers to questions about the weather,
237
+ stocks 📈, news📰, and more! Plus, you can also generate 🖌️
238
+ paintings, drawings, abstract art 🎨, play music 🎵 or videos,
239
+ create tweets 🐦 and posts 📝, and compose emails 📧 - all with
240
+ the help of HyperBot! 🤖 ✨
241
  ''')
242
 
243
+ st.text('''You can ask me:
244
+ 1. All the things you ask ChatGPT.
245
+ 2. Generating paintings, drawings, abstract art.
246
+ 3. Music or Videos
247
+ 4. Weather
248
+ 5. Stocks
249
+ 6. Current Affairs and News.
250
+ 7. Create or compose tweets or Linkedin posts or email.''')
251
+
252
+ Input_type = st.radio(
253
+ "**Input type:**",
254
+ ('TEXT', 'SPEECH')
255
+ )
256
+
257
+ if Input_type == 'TEXT':
258
+ st.write('**You are now in Text input mode**')
259
+ mytext = st.text_input('**Go on! Ask me anything:**')
260
+ if st.button("SUBMIT"):
261
+ question=mytext
262
+ response = openai.Completion.create(
263
+ model="text-davinci-003",
264
+ prompt=f'''Your name is HyperBot and knowledge cutoff date is 2021-09, and you are not aware of any events after that time. if the
265
+ Answer to following questions is not from your knowledge base or in case of queries like weather
266
+ updates / stock updates / current news or people which requires you to have internet connection then print i don't have access to internet to answer your question,
267
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
268
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
269
+ if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
270
+ if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
271
+ \nQuestion-{question}
272
+ \nAnswer -''',
273
+ temperature=0.49,
274
+ max_tokens=256,
275
+ top_p=1,
276
+ frequency_penalty=0,
277
+ presence_penalty=0
278
+ )
279
+ string_temp=response.choices[0].text
280
 
281
+ if ("gen_draw" in string_temp):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283
  try:
284
+ wget.download(openai_response(prompt))
285
+ img2 = Image.open(wget.download(openai_response(prompt)))
286
+ img2.show()
287
+ rx = 'Image returned'
288
+ g_sheet_log(mytext, rx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
  except:
290
+ urllib.request.urlretrieve(openai_response(prompt),"img_ret.png")
291
+ img = Image.open("img_ret.png")
292
+ img.show()
293
+ rx = 'Image returned'
294
+ g_sheet_log(mytext, rx)
295
+ except:
296
+ # Set up our initial generation parameters.
297
+ answers = stability_api.generate(
298
+ prompt = mytext,
299
+ seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
300
+ # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
301
+ # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
302
+ steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
303
+ cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
304
+ # Setting this value higher increases the strength in which it tries to match your prompt.
305
+ # Defaults to 7.0 if not specified.
306
+ width=512, # Generation width, defaults to 512 if not included.
307
+ height=512, # Generation height, defaults to 512 if not included.
308
+ samples=1, # Number of images to generate, defaults to 1 if not included.
309
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
310
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
311
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
312
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
 
314
+ # Set up our warning to print to the console if the adult content classifier is tripped.
315
+ # If adult content classifier is not tripped, save generated images.
316
+ for resp in answers:
317
+ for artifact in resp.artifacts:
318
+ if artifact.finish_reason == generation.FILTER:
319
+ warnings.warn(
320
+ "Your request activated the API's safety filters and could not be processed."
321
+ "Please modify the prompt and try again.")
322
+ if artifact.type == generation.ARTIFACT_IMAGE:
323
+ img = Image.open(io.BytesIO(artifact.binary))
324
+ st.image(img)
325
+ img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
326
+ rx = 'Image returned'
327
+ g_sheet_log(mytext, rx)
328
+
329
+ # except:
330
+ # st.write('image is being generated please wait...')
331
+ # def extract_image_description(input_string):
332
+ # return input_string.split('gen_draw("')[1].split('")')[0]
333
+ # prompt=extract_image_description(string_temp)
334
+ # # model_id = "CompVis/stable-diffusion-v1-4"
335
+ # model_id='runwayml/stable-diffusion-v1-5'
336
+ # device = "cuda"
337
+
338
+
339
+ # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
340
+ # pipe = pipe.to(device)
341
+
342
+ # # prompt = "a photo of an astronaut riding a horse on mars"
343
+ # image = pipe(prompt).images[0]
344
+
345
+ # image.save("astronaut_rides_horse.png")
346
+ # st.image(image)
347
+ # # image
348
+
349
+ elif ("vid_tube" in string_temp):
350
+ s = Search(mytext)
351
+ search_res = s.results
352
+ first_vid = search_res[0]
353
+ print(first_vid)
354
+ string = str(first_vid)
355
+ video_id = string[string.index('=') + 1:-1]
356
+ # print(video_id)
357
+ YoutubeURL = "https://www.youtube.com/watch?v="
358
+ OurURL = YoutubeURL + video_id
359
+ st.write(OurURL)
360
+ st_player(OurURL)
361
+ ry = 'Youtube link and video returned'
362
+ g_sheet_log(mytext, ry)
363
+
364
+ elif ("don't" in string_temp or "internet" in string_temp):
365
+ st.write('searching internet ')
366
+ search_internet(question)
367
+ rz = 'Internet result returned'
368
+ g_sheet_log(mytext, string_temp)
369
+
370
+ else:
371
+ st.write(string_temp)
372
+ g_sheet_log(mytext, string_temp)
373
+
374
+ elif Input_type == 'SPEECH':
375
+ option_speech = st.selectbox(
376
+ 'Choose from below: (Options for Transcription)',
377
+ ('Use Microphone', 'OpenAI Whisper (Upload audio file)')
378
+ )
379
+
380
+ if option_speech == 'Use Microphone':
381
+ stt_button = Button(label="Speak", width=100)
382
+ stt_button.js_on_event("button_click", CustomJS(code="""
383
+ var recognition = new webkitSpeechRecognition();
384
+ recognition.continuous = true;
385
+ recognition.interimResults = true;
386
+
387
+ recognition.onresult = function (e) {
388
+ var value = "";
389
+ for (var i = e.resultIndex; i < e.results.length; ++i) {
390
+ if (e.results[i].isFinal) {
391
+ value += e.results[i][0].transcript;
392
  }
393
  }
394
+ if ( value != "") {
395
+ document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
396
+ }
397
+ }
398
+ recognition.start();
399
+ """))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
 
401
+ result = streamlit_bokeh_events(
402
+ stt_button,
403
+ events="GET_TEXT",
404
+ key="listen",
405
+ refresh_on_update=False,
406
+ override_height=75,
407
+ debounce_time=0)
 
 
 
 
 
 
 
 
 
 
 
 
408
 
409
+ if result:
410
+ if "GET_TEXT" in result:
411
+ question = result.get("GET_TEXT")
 
 
 
 
 
 
 
 
 
 
 
 
412
  response = openai.Completion.create(
413
  model="text-davinci-003",
414
  prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
 
424
  frequency_penalty=0,
425
  presence_penalty=0
426
  )
 
427
  string_temp=response.choices[0].text
428
 
429
  if ("gen_draw" in string_temp):
 
463
  search_internet(question)
464
  else:
465
  st.write(string_temp)
466
+
467
+ elif option_speech == 'OpenAI Whisper (Upload audio file)':
468
+ audio_file = st.file_uploader("Upload Audio file",type=['wav', 'mp3'])
469
+ if audio_file is not None:
470
+ # file = open(audio_file, "rb")
471
+ st.audio(audio_file)
472
+ transcription = openai.Audio.transcribe("whisper-1", audio_file)
473
+ st.write(transcription["text"])
474
+ result = transcription["text"]
475
+ question = result
476
+ response = openai.Completion.create(
477
+ model="text-davinci-003",
478
+ prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
479
+ Answer to following questions is not from your knowledge base or in case of queries like weather
480
+ updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
481
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
482
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
483
+ \nQuestion-{question}
484
+ \nAnswer -''',
485
+ temperature=0.49,
486
+ max_tokens=256,
487
+ top_p=1,
488
+ frequency_penalty=0,
489
+ presence_penalty=0
490
+ )
491
 
492
+ string_temp=response.choices[0].text
493
+
494
+ if ("gen_draw" in string_temp):
495
+ st.write('*image is being generated please wait..* ')
496
+ def extract_image_description(input_string):
497
+ return input_string.split('gen_draw("')[1].split('")')[0]
498
+ prompt=extract_image_description(string_temp)
499
+ # model_id = "CompVis/stable-diffusion-v1-4"
500
+ model_id='runwayml/stable-diffusion-v1-5'
501
+ device = "cuda"
502
+
503
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
504
+ pipe = pipe.to(device)
505
+
506
+ # prompt = "a photo of an astronaut riding a horse on mars"
507
+ image = pipe(prompt).images[0]
508
+
509
+ image.save("astronaut_rides_horse.png")
510
+ st.image(image)
511
+ # image
512
 
513
+ elif ("vid_tube" in string_temp):
514
+ s = Search(question)
515
+ search_res = s.results
516
+ first_vid = search_res[0]
517
+ print(first_vid)
518
+ string = str(first_vid)
519
+ video_id = string[string.index('=') + 1:-1]
520
+ # print(video_id)
521
+ YoutubeURL = "https://www.youtube.com/watch?v="
522
+ OurURL = YoutubeURL + video_id
523
+ st.write(OurURL)
524
+ st_player(OurURL)
525
+
526
+ elif ("don't" in string_temp or "internet" in string_temp ):
527
+ st.write('*searching internet*')
528
+ search_internet(question)
529
+ else:
530
+ st.write(string_temp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
531
  else:
532
  pass