CobaltZvc commited on
Commit
1cb9ae8
1 Parent(s): 137de90

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -85
app.py CHANGED
@@ -613,97 +613,96 @@ elif Usage == 'Random Questions':
613
  g_sheet_log(mytext, string_temp)
614
 
615
  elif Input_type == 'SPEECH':
616
- try:
617
- stt_button = Button(label="Speak", width=100)
618
- stt_button.js_on_event("button_click", CustomJS(code="""
619
- var recognition = new webkitSpeechRecognition();
620
- recognition.continuous = true;
621
- recognition.interimResults = true;
622
-
623
- recognition.onresult = function (e) {
624
- var value = "";
625
- for (var i = e.resultIndex; i < e.results.length; ++i) {
626
- if (e.results[i].isFinal) {
627
- value += e.results[i][0].transcript;
628
- }
629
- }
630
- if ( value != "") {
631
- document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
632
  }
633
  }
634
- recognition.start();
635
- """))
636
-
637
- result = streamlit_bokeh_events(
638
- stt_button,
639
- events="GET_TEXT",
640
- key="listen",
641
- refresh_on_update=False,
642
- override_height=75,
643
- debounce_time=0)
644
-
645
- if result:
646
- if "GET_TEXT" in result:
647
- question = result.get("GET_TEXT")
648
- response = openai.Completion.create(
649
- model="gpt-3.5-turbo",
650
- prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
651
- Answer to following questions is not from your knowledge base or in case of queries like weather
652
- updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
653
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
654
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
655
- \nQuestion-{question}
656
- \nAnswer -''',
657
- temperature=0.49,
658
- max_tokens=256,
659
- top_p=1,
660
- frequency_penalty=0,
661
- presence_penalty=0
662
- )
663
- string_temp=response.choices[0].text
664
-
665
- if ("gen_draw" in string_temp):
666
- st.write('*image is being generated please wait..* ')
667
- def extract_image_description(input_string):
668
- return input_string.split('gen_draw("')[1].split('")')[0]
669
- prompt=extract_image_description(string_temp)
670
- # model_id = "CompVis/stable-diffusion-v1-4"
671
- model_id='runwayml/stable-diffusion-v1-5'
672
- device = "cuda"
673
-
674
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
675
- pipe = pipe.to(device)
676
 
677
- # prompt = "a photo of an astronaut riding a horse on mars"
678
- image = pipe(prompt).images[0]
679
-
680
- image.save("astronaut_rides_horse.png")
681
- st.image(image)
682
- # image
683
-
684
- elif ("vid_tube" in string_temp):
685
- s = Search(question)
686
- search_res = s.results
687
- first_vid = search_res[0]
688
- print(first_vid)
689
- string = str(first_vid)
690
- video_id = string[string.index('=') + 1:-1]
691
- # print(video_id)
692
- YoutubeURL = "https://www.youtube.com/watch?v="
693
- OurURL = YoutubeURL + video_id
694
- st.write(OurURL)
695
- st_player(OurURL)
696
 
697
- elif ("don't" in string_temp or "internet" in string_temp ):
698
- st.write('*searching internet*')
699
- search_internet(question)
700
- else:
701
- st.write(string_temp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
702
 
703
-
704
 
705
- except:
706
- pass
 
707
  # st.text("Record your audio, **max length - 5 seconds**")
708
  # if st.button("Record"):
709
  # st.write("Recording...")
 
613
  g_sheet_log(mytext, string_temp)
614
 
615
  elif Input_type == 'SPEECH':
616
+ stt_button = Button(label="Speak", width=100)
617
+ stt_button.js_on_event("button_click", CustomJS(code="""
618
+ var recognition = new webkitSpeechRecognition();
619
+ recognition.continuous = true;
620
+ recognition.interimResults = true;
621
+
622
+ recognition.onresult = function (e) {
623
+ var value = "";
624
+ for (var i = e.resultIndex; i < e.results.length; ++i) {
625
+ if (e.results[i].isFinal) {
626
+ value += e.results[i][0].transcript;
 
 
 
 
 
627
  }
628
  }
629
+ if ( value != "") {
630
+ document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
631
+ }
632
+ }
633
+ recognition.start();
634
+ """))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
 
636
+ result = streamlit_bokeh_events(
637
+ stt_button,
638
+ events="GET_TEXT",
639
+ key="listen",
640
+ refresh_on_update=False,
641
+ override_height=75,
642
+ debounce_time=0)
 
 
 
 
 
 
 
 
 
 
 
 
643
 
644
+ if result:
645
+ if "GET_TEXT" in result:
646
+ question = result.get("GET_TEXT")
647
+ response = openai.Completion.create(
648
+ model="gpt-3.5-turbo",
649
+ prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
650
+ Answer to following questions is not from your knowledge base or in case of queries like weather
651
+ updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
652
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
653
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
654
+ \nQuestion-{question}
655
+ \nAnswer -''',
656
+ temperature=0.49,
657
+ max_tokens=256,
658
+ top_p=1,
659
+ frequency_penalty=0,
660
+ presence_penalty=0
661
+ )
662
+ string_temp=response.choices[0].text
663
+
664
+ if ("gen_draw" in string_temp):
665
+ st.write('*image is being generated please wait..* ')
666
+ def extract_image_description(input_string):
667
+ return input_string.split('gen_draw("')[1].split('")')[0]
668
+ prompt=extract_image_description(string_temp)
669
+ # model_id = "CompVis/stable-diffusion-v1-4"
670
+ model_id='runwayml/stable-diffusion-v1-5'
671
+ device = "cuda"
672
+
673
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
674
+ pipe = pipe.to(device)
675
+
676
+ # prompt = "a photo of an astronaut riding a horse on mars"
677
+ image = pipe(prompt).images[0]
678
+
679
+ image.save("astronaut_rides_horse.png")
680
+ st.image(image)
681
+ # image
682
+
683
+ elif ("vid_tube" in string_temp):
684
+ s = Search(question)
685
+ search_res = s.results
686
+ first_vid = search_res[0]
687
+ print(first_vid)
688
+ string = str(first_vid)
689
+ video_id = string[string.index('=') + 1:-1]
690
+ # print(video_id)
691
+ YoutubeURL = "https://www.youtube.com/watch?v="
692
+ OurURL = YoutubeURL + video_id
693
+ st.write(OurURL)
694
+ st_player(OurURL)
695
+
696
+ elif ("don't" in string_temp or "internet" in string_temp ):
697
+ st.write('*searching internet*')
698
+ search_internet(question)
699
+ else:
700
+ st.write(string_temp)
701
 
 
702
 
703
+
704
+ # except:
705
+ # pass
706
  # st.text("Record your audio, **max length - 5 seconds**")
707
  # if st.button("Record"):
708
  # st.write("Recording...")