CobaltZvc commited on
Commit
5c139f2
1 Parent(s): 3ad9cdb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -13
app.py CHANGED
@@ -615,21 +615,37 @@ elif Usage == 'Random Questions':
615
 
616
  elif Input_type == 'SPEECH':
617
  try:
618
- st.text("Record your audio, **max length - 5 seconds**")
619
- if st.button("Record"):
620
- st.write("Recording...")
621
- audio_file = record_audio()
622
- st.write("Recording complete.")
623
- file = open(audio_file, "rb")
 
 
 
 
 
 
 
 
 
 
 
 
 
624
 
625
- # Play the recorded audio
626
- st.audio(audio_file)
 
 
 
 
 
627
 
628
- transcription = openai.Audio.transcribe("whisper-1", file)
629
- result = transcription["text"]
630
- st.write(f"Fetched from audio - {result}")
631
-
632
- question = result
633
  response = openai.Completion.create(
634
  model="gpt-3.5-turbo",
635
  prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
@@ -684,8 +700,79 @@ elif Usage == 'Random Questions':
684
  search_internet(question)
685
  else:
686
  st.write(string_temp)
 
 
687
 
688
  except:
689
  pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
690
  else:
691
  pass
 
615
 
616
  elif Input_type == 'SPEECH':
617
  try:
618
+ stt_button = Button(label="Speak", width=100)
619
+ stt_button.js_on_event("button_click", CustomJS(code="""
620
+ var recognition = new webkitSpeechRecognition();
621
+ recognition.continuous = true;
622
+ recognition.interimResults = true;
623
+
624
+ recognition.onresult = function (e) {
625
+ var value = "";
626
+ for (var i = e.resultIndex; i < e.results.length; ++i) {
627
+ if (e.results[i].isFinal) {
628
+ value += e.results[i][0].transcript;
629
+ }
630
+ }
631
+ if ( value != "") {
632
+ document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
633
+ }
634
+ }
635
+ recognition.start();
636
+ """))
637
 
638
+ result = streamlit_bokeh_events(
639
+ stt_button,
640
+ events="GET_TEXT",
641
+ key="listen",
642
+ refresh_on_update=False,
643
+ override_height=75,
644
+ debounce_time=0)
645
 
646
+ if result:
647
+ if "GET_TEXT" in result:
648
+ question = result.get("GET_TEXT")
 
 
649
  response = openai.Completion.create(
650
  model="gpt-3.5-turbo",
651
  prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
 
700
  search_internet(question)
701
  else:
702
  st.write(string_temp)
703
+
704
+
705
 
706
  except:
707
  pass
708
+ # st.text("Record your audio, **max length - 5 seconds**")
709
+ # if st.button("Record"):
710
+ # st.write("Recording...")
711
+ # audio_file = record_audio()
712
+ # st.write("Recording complete.")
713
+ # file = open(audio_file, "rb")
714
+
715
+ # # Play the recorded audio
716
+ # st.audio(audio_file)
717
+
718
+ # transcription = openai.Audio.transcribe("whisper-1", file)
719
+ # result = transcription["text"]
720
+ # st.write(f"Fetched from audio - {result}")
721
+
722
+ # question = result
723
+ # response = openai.Completion.create(
724
+ # model="gpt-3.5-turbo",
725
+ # prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
726
+ # Answer to following questions is not from your knowledge base or in case of queries like weather
727
+ # updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
728
+ # if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
729
+ # if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
730
+ # \nQuestion-{question}
731
+ # \nAnswer -''',
732
+ # temperature=0.49,
733
+ # max_tokens=256,
734
+ # top_p=1,
735
+ # frequency_penalty=0,
736
+ # presence_penalty=0
737
+ # )
738
+ # string_temp=response.choices[0].text
739
+
740
+ # if ("gen_draw" in string_temp):
741
+ # st.write('*image is being generated please wait..* ')
742
+ # def extract_image_description(input_string):
743
+ # return input_string.split('gen_draw("')[1].split('")')[0]
744
+ # prompt=extract_image_description(string_temp)
745
+ # # model_id = "CompVis/stable-diffusion-v1-4"
746
+ # model_id='runwayml/stable-diffusion-v1-5'
747
+ # device = "cuda"
748
+
749
+ # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
750
+ # pipe = pipe.to(device)
751
+
752
+ # # prompt = "a photo of an astronaut riding a horse on mars"
753
+ # image = pipe(prompt).images[0]
754
+
755
+ # image.save("astronaut_rides_horse.png")
756
+ # st.image(image)
757
+ # # image
758
+
759
+ # elif ("vid_tube" in string_temp):
760
+ # s = Search(question)
761
+ # search_res = s.results
762
+ # first_vid = search_res[0]
763
+ # print(first_vid)
764
+ # string = str(first_vid)
765
+ # video_id = string[string.index('=') + 1:-1]
766
+ # # print(video_id)
767
+ # YoutubeURL = "https://www.youtube.com/watch?v="
768
+ # OurURL = YoutubeURL + video_id
769
+ # st.write(OurURL)
770
+ # st_player(OurURL)
771
+
772
+ # elif ("don't" in string_temp or "internet" in string_temp ):
773
+ # st.write('*searching internet*')
774
+ # search_internet(question)
775
+ # else:
776
+ # st.write(string_temp)
777
  else:
778
  pass