CobaltZvc commited on
Commit
2ba3b06
1 Parent(s): 651a59a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +209 -208
app.py CHANGED
@@ -446,222 +446,223 @@ elif Usage == 'Random Questions':
446
  6. Current Affairs and News.
447
  7. Create or compose tweets or Linkedin posts or email.''')
448
 
449
- Input_type = st.radio(
450
- "**Input type:**",
451
- ('TEXT', 'SPEECH')
452
- )
453
-
454
- if Input_type == 'TEXT':
455
- #page_bg_img2 = """
456
- #<style>
457
- #[data-testid="stAppViewContainer"] {
458
- #background-color: #e5e5f7;
459
- #opacity: 0.8;
460
- #background-size: 20px 20px;
461
- #background-image: repeating-linear-gradient(0deg, #32d947, #32d947 1px, #e5e5f7 1px, #e5e5f7);
462
- #}
463
- #</style>
464
- #"""
465
- #st.markdown(page_bg_img, unsafe_allow_html=True)
466
- st.write('**You are now in Text input mode**')
467
- mytext = st.text_input('**Go on! Ask me anything:**')
468
- if st.button("SUBMIT"):
469
- question=mytext
470
- response = openai.Completion.create(
471
- model="text-davinci-003",
472
- prompt=f'''Your name is alexa and knowledge cutoff date is 2021-09, and it is not aware of any events after that time. if the
473
- Answer to following questions is not from your knowledge base or in case of queries like weather
474
- updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
475
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
476
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
477
- if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
478
- if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
479
- \nQuestion-{question}
480
- \nAnswer -''',
481
- temperature=0.49,
482
- max_tokens=256,
483
- top_p=1,
484
- frequency_penalty=0,
485
- presence_penalty=0
486
- )
487
- string_temp=response.choices[0].text
488
 
489
- if ("gen_draw" in string_temp):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490
  try:
491
- try:
492
- wget.download(openai_response(prompt))
493
- img2 = Image.open(wget.download(openai_response(prompt)))
494
- img2.show()
495
- rx = 'Image returned'
496
- g_sheet_log(mytext, rx)
497
- except:
498
- urllib.request.urlretrieve(openai_response(prompt),"img_ret.png")
499
- img = Image.open("img_ret.png")
500
- img.show()
501
- rx = 'Image returned'
502
- g_sheet_log(mytext, rx)
503
  except:
504
- # Set up our initial generation parameters.
505
- answers = stability_api.generate(
506
- prompt = mytext,
507
- seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
508
- # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
509
- # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
510
- steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
511
- cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
512
- # Setting this value higher increases the strength in which it tries to match your prompt.
513
- # Defaults to 7.0 if not specified.
514
- width=512, # Generation width, defaults to 512 if not included.
515
- height=512, # Generation height, defaults to 512 if not included.
516
- samples=1, # Number of images to generate, defaults to 1 if not included.
517
- sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
518
- # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
519
- # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
520
- )
521
-
522
- # Set up our warning to print to the console if the adult content classifier is tripped.
523
- # If adult content classifier is not tripped, save generated images.
524
- for resp in answers:
525
- for artifact in resp.artifacts:
526
- if artifact.finish_reason == generation.FILTER:
527
- warnings.warn(
528
- "Your request activated the API's safety filters and could not be processed."
529
- "Please modify the prompt and try again.")
530
- if artifact.type == generation.ARTIFACT_IMAGE:
531
- img = Image.open(io.BytesIO(artifact.binary))
532
- st.image(img)
533
- img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
534
- rx = 'Image returned'
535
- g_sheet_log(mytext, rx)
536
-
537
- # except:
538
- # st.write('image is being generated please wait...')
539
- # def extract_image_description(input_string):
540
- # return input_string.split('gen_draw("')[1].split('")')[0]
541
- # prompt=extract_image_description(string_temp)
542
- # # model_id = "CompVis/stable-diffusion-v1-4"
543
- # model_id='runwayml/stable-diffusion-v1-5'
544
- # device = "cuda"
545
-
546
-
547
- # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
548
- # pipe = pipe.to(device)
549
-
550
- # # prompt = "a photo of an astronaut riding a horse on mars"
551
- # image = pipe(prompt).images[0]
552
-
553
- # image.save("astronaut_rides_horse.png")
554
- # st.image(image)
555
- # # image
556
-
557
- elif ("vid_tube" in string_temp):
558
- s = Search(mytext)
559
- search_res = s.results
560
- first_vid = search_res[0]
561
- print(first_vid)
562
- string = str(first_vid)
563
- video_id = string[string.index('=') + 1:-1]
564
- # print(video_id)
565
- YoutubeURL = "https://www.youtube.com/watch?v="
566
- OurURL = YoutubeURL + video_id
567
- st.write(OurURL)
568
- st_player(OurURL)
569
- ry = 'Youtube link and video returned'
570
- g_sheet_log(mytext, ry)
571
-
572
- elif ("don't" in string_temp or "internet" in string_temp):
573
- st.write('searching internet ')
574
- search_internet(question)
575
- rz = 'Internet result returned'
576
- g_sheet_log(mytext, rz)
577
-
578
- else:
579
- st.write(string_temp)
580
- g_sheet_log(mytext, string_temp)
581
-
582
- elif Input_type == 'SPEECH':
583
- stt_button = Button(label="Speak", width=100)
584
- stt_button.js_on_event("button_click", CustomJS(code="""
585
- var recognition = new webkitSpeechRecognition();
586
- recognition.continuous = true;
587
- recognition.interimResults = true;
588
- recognition.onresult = function (e) {
589
- var value = "";
590
- for (var i = e.resultIndex; i < e.results.length; ++i) {
591
- if (e.results[i].isFinal) {
592
- value += e.results[i][0].transcript;
593
- }
594
- }
595
- if ( value != "") {
596
- document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
597
- }
598
- }
599
- recognition.start();
600
- """))
 
 
 
 
 
 
 
601
 
602
- result = streamlit_bokeh_events(
603
- stt_button,
604
- events="GET_TEXT",
605
- key="listen",
606
- refresh_on_update=False,
607
- override_height=75,
608
- debounce_time=0)
609
 
610
- if result:
611
- if "GET_TEXT" in result:
612
- st.write(result.get("GET_TEXT"))
613
- question = result.get("GET_TEXT")
614
- response = openai.Completion.create(
615
- model="text-davinci-003",
616
- prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
617
- Answer to following questions is not from your knowledge base or in case of queries like weather
618
- updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
619
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
620
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
621
- \nQuestion-{question}
622
- \nAnswer -''',
623
- temperature=0.49,
624
- max_tokens=256,
625
- top_p=1,
626
- frequency_penalty=0,
627
- presence_penalty=0
628
- )
629
- string_temp=response.choices[0].text
630
 
631
- if ("gen_draw" in string_temp):
632
- st.write('*image is being generated please wait..* ')
633
- def extract_image_description(input_string):
634
- return input_string.split('gen_draw("')[1].split('")')[0]
635
- prompt=extract_image_description(string_temp)
636
- # model_id = "CompVis/stable-diffusion-v1-4"
637
- model_id='runwayml/stable-diffusion-v1-5'
638
- device = "cuda"
639
 
640
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
641
- pipe = pipe.to(device)
642
 
643
- # prompt = "a photo of an astronaut riding a horse on mars"
644
- image = pipe(prompt).images[0]
645
 
646
- image.save("astronaut_rides_horse.png")
647
- st.image(image)
648
- # image
649
 
650
- elif ("vid_tube" in string_temp):
651
- s = Search(question)
652
- search_res = s.results
653
- first_vid = search_res[0]
654
- print(first_vid)
655
- string = str(first_vid)
656
- video_id = string[string.index('=') + 1:-1]
657
- # print(video_id)
658
- YoutubeURL = "https://www.youtube.com/watch?v="
659
- OurURL = YoutubeURL + video_id
660
- st.write(OurURL)
661
- st_player(OurURL)
662
 
663
- elif ("don't" in string_temp or "internet" in string_temp ):
664
- st.write('*searching internet*')
665
- search_internet(question)
666
- else:
667
- st.write(string_temp)
 
446
  6. Current Affairs and News.
447
  7. Create or compose tweets or Linkedin posts or email.''')
448
 
449
+ # Input_type = st.radio(
450
+ # "**Input type:**",
451
+ # ('TEXT', 'SPEECH')
452
+ # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
453
 
454
+ # if Input_type == 'TEXT':
455
+ #page_bg_img2 = """
456
+ #<style>
457
+ #[data-testid="stAppViewContainer"] {
458
+ #background-color: #e5e5f7;
459
+ #opacity: 0.8;
460
+ #background-size: 20px 20px;
461
+ #background-image: repeating-linear-gradient(0deg, #32d947, #32d947 1px, #e5e5f7 1px, #e5e5f7);
462
+ #}
463
+ #</style>
464
+ #"""
465
+ #st.markdown(page_bg_img, unsafe_allow_html=True)
466
+ st.write('**You are now in Text input mode**')
467
+ mytext = st.text_input('**Go on! Ask me anything:**')
468
+ if st.button("SUBMIT"):
469
+ question=mytext
470
+ response = openai.Completion.create(
471
+ model="text-davinci-003",
472
+ prompt=f'''Your name is HyperBot and knowledge cutoff date is 2021-09, and you are not aware of any events after that time. if the
473
+ Answer to following questions is not from your knowledge base or in case of queries like weather
474
+ updates / stock updates / current news or people which requires you to have internet connection then print i don't have access to internet to answer your question,
475
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
476
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
477
+ if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
478
+ if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
479
+ \nQuestion-{question}
480
+ \nAnswer -''',
481
+ temperature=0.49,
482
+ max_tokens=256,
483
+ top_p=1,
484
+ frequency_penalty=0,
485
+ presence_penalty=0
486
+ )
487
+ string_temp=response.choices[0].text
488
+
489
+ if ("gen_draw" in string_temp):
490
+ try:
491
  try:
492
+ wget.download(openai_response(prompt))
493
+ img2 = Image.open(wget.download(openai_response(prompt)))
494
+ img2.show()
495
+ rx = 'Image returned'
496
+ g_sheet_log(mytext, rx)
 
 
 
 
 
 
 
497
  except:
498
+ urllib.request.urlretrieve(openai_response(prompt),"img_ret.png")
499
+ img = Image.open("img_ret.png")
500
+ img.show()
501
+ rx = 'Image returned'
502
+ g_sheet_log(mytext, rx)
503
+ except:
504
+ # Set up our initial generation parameters.
505
+ answers = stability_api.generate(
506
+ prompt = mytext,
507
+ seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
508
+ # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
509
+ # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
510
+ steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
511
+ cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
512
+ # Setting this value higher increases the strength in which it tries to match your prompt.
513
+ # Defaults to 7.0 if not specified.
514
+ width=512, # Generation width, defaults to 512 if not included.
515
+ height=512, # Generation height, defaults to 512 if not included.
516
+ samples=1, # Number of images to generate, defaults to 1 if not included.
517
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
518
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
519
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
520
+ )
521
+
522
+ # Set up our warning to print to the console if the adult content classifier is tripped.
523
+ # If adult content classifier is not tripped, save generated images.
524
+ for resp in answers:
525
+ for artifact in resp.artifacts:
526
+ if artifact.finish_reason == generation.FILTER:
527
+ warnings.warn(
528
+ "Your request activated the API's safety filters and could not be processed."
529
+ "Please modify the prompt and try again.")
530
+ if artifact.type == generation.ARTIFACT_IMAGE:
531
+ img = Image.open(io.BytesIO(artifact.binary))
532
+ st.image(img)
533
+ img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
534
+ rx = 'Image returned'
535
+ g_sheet_log(mytext, rx)
536
+
537
+ # except:
538
+ # st.write('image is being generated please wait...')
539
+ # def extract_image_description(input_string):
540
+ # return input_string.split('gen_draw("')[1].split('")')[0]
541
+ # prompt=extract_image_description(string_temp)
542
+ # # model_id = "CompVis/stable-diffusion-v1-4"
543
+ # model_id='runwayml/stable-diffusion-v1-5'
544
+ # device = "cuda"
545
+
546
+
547
+ # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
548
+ # pipe = pipe.to(device)
549
+
550
+ # # prompt = "a photo of an astronaut riding a horse on mars"
551
+ # image = pipe(prompt).images[0]
552
+
553
+ # image.save("astronaut_rides_horse.png")
554
+ # st.image(image)
555
+ # # image
556
+
557
+ elif ("vid_tube" in string_temp):
558
+ s = Search(mytext)
559
+ search_res = s.results
560
+ first_vid = search_res[0]
561
+ print(first_vid)
562
+ string = str(first_vid)
563
+ video_id = string[string.index('=') + 1:-1]
564
+ # print(video_id)
565
+ YoutubeURL = "https://www.youtube.com/watch?v="
566
+ OurURL = YoutubeURL + video_id
567
+ st.write(OurURL)
568
+ st_player(OurURL)
569
+ ry = 'Youtube link and video returned'
570
+ g_sheet_log(mytext, ry)
571
+
572
+ elif ("don't" in string_temp or "internet" in string_temp):
573
+ st.write('searching internet ')
574
+ search_internet(question)
575
+ rz = 'Internet result returned'
576
+ g_sheet_log(mytext, string_temp)
577
+
578
+ else:
579
+ st.write(string_temp)
580
+ g_sheet_log(mytext, string_temp)
581
+ else:
582
+ pass
583
+ # elif Input_type == 'SPEECH':
584
+ # stt_button = Button(label="Speak", width=100)
585
+ # stt_button.js_on_event("button_click", CustomJS(code="""
586
+ # var recognition = new webkitSpeechRecognition();
587
+ # recognition.continuous = true;
588
+ # recognition.interimResults = true;
589
+ # recognition.onresult = function (e) {
590
+ # var value = "";
591
+ # for (var i = e.resultIndex; i < e.results.length; ++i) {
592
+ # if (e.results[i].isFinal) {
593
+ # value += e.results[i][0].transcript;
594
+ # }
595
+ # }
596
+ # if ( value != "") {
597
+ # document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
598
+ # }
599
+ # }
600
+ # recognition.start();
601
+ # """))
602
 
603
+ # result = streamlit_bokeh_events(
604
+ # stt_button,
605
+ # events="GET_TEXT",
606
+ # key="listen",
607
+ # refresh_on_update=False,
608
+ # override_height=75,
609
+ # debounce_time=0)
610
 
611
+ # if result:
612
+ # if "GET_TEXT" in result:
613
+ # st.write(result.get("GET_TEXT"))
614
+ # question = result.get("GET_TEXT")
615
+ # response = openai.Completion.create(
616
+ # model="text-davinci-003",
617
+ # prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
618
+ # Answer to following questions is not from your knowledge base or in case of queries like weather
619
+ # updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
620
+ # if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
621
+ # if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
622
+ # \nQuestion-{question}
623
+ # \nAnswer -''',
624
+ # temperature=0.49,
625
+ # max_tokens=256,
626
+ # top_p=1,
627
+ # frequency_penalty=0,
628
+ # presence_penalty=0
629
+ # )
630
+ # string_temp=response.choices[0].text
631
 
632
+ # if ("gen_draw" in string_temp):
633
+ # st.write('*image is being generated please wait..* ')
634
+ # def extract_image_description(input_string):
635
+ # return input_string.split('gen_draw("')[1].split('")')[0]
636
+ # prompt=extract_image_description(string_temp)
637
+ # # model_id = "CompVis/stable-diffusion-v1-4"
638
+ # model_id='runwayml/stable-diffusion-v1-5'
639
+ # device = "cuda"
640
 
641
+ # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
642
+ # pipe = pipe.to(device)
643
 
644
+ # # prompt = "a photo of an astronaut riding a horse on mars"
645
+ # image = pipe(prompt).images[0]
646
 
647
+ # image.save("astronaut_rides_horse.png")
648
+ # st.image(image)
649
+ # # image
650
 
651
+ # elif ("vid_tube" in string_temp):
652
+ # s = Search(question)
653
+ # search_res = s.results
654
+ # first_vid = search_res[0]
655
+ # print(first_vid)
656
+ # string = str(first_vid)
657
+ # video_id = string[string.index('=') + 1:-1]
658
+ # # print(video_id)
659
+ # YoutubeURL = "https://www.youtube.com/watch?v="
660
+ # OurURL = YoutubeURL + video_id
661
+ # st.write(OurURL)
662
+ # st_player(OurURL)
663
 
664
+ # elif ("don't" in string_temp or "internet" in string_temp ):
665
+ # st.write('*searching internet*')
666
+ # search_internet(question)
667
+ # else:
668
+ # st.write(string_temp)