CobaltZvc commited on
Commit
270a0ee
1 Parent(s): 683e94b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -49
app.py CHANGED
@@ -389,13 +389,15 @@ elif Input_type == 'SPEECH':
389
  st.text(question)
390
  response = openai.Completion.create(
391
  model="text-davinci-003",
392
- prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
393
- Answer to following questions is not from your knowledge base or in case of queries like weather
394
- updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
395
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
396
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
397
- \nQuestion-{question}
398
- \nAnswer -''',
 
 
399
  temperature=0.49,
400
  max_tokens=256,
401
  top_p=1,
@@ -405,24 +407,56 @@ elif Input_type == 'SPEECH':
405
  string_temp=response.choices[0].text
406
 
407
  if ("gen_draw" in string_temp):
408
- st.write('*image is being generated please wait..* ')
409
- def extract_image_description(input_string):
410
- return input_string.split('gen_draw("')[1].split('")')[0]
411
- prompt=extract_image_description(string_temp)
412
- # model_id = "CompVis/stable-diffusion-v1-4"
413
- model_id='runwayml/stable-diffusion-v1-5'
414
- device = "cuda"
415
-
416
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
417
- pipe = pipe.to(device)
418
-
419
- # prompt = "a photo of an astronaut riding a horse on mars"
420
- image = pipe(prompt).images[0]
421
-
422
- image.save("astronaut_rides_horse.png")
423
- st.image(image)
424
- # image
425
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426
  elif ("vid_tube" in string_temp):
427
  s = Search(question)
428
  search_res = s.results
@@ -463,13 +497,15 @@ elif Input_type == 'SPEECH':
463
  question = result
464
  response = openai.Completion.create(
465
  model="text-davinci-003",
466
- prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
467
- Answer to following questions is not from your knowledge base or in case of queries like weather
468
- updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
469
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
470
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
471
- \nQuestion-{question}
472
- \nAnswer -''',
 
 
473
  temperature=0.49,
474
  max_tokens=256,
475
  top_p=1,
@@ -480,23 +516,56 @@ elif Input_type == 'SPEECH':
480
  string_temp=response.choices[0].text
481
 
482
  if ("gen_draw" in string_temp):
483
- st.write('*image is being generated please wait..* ')
484
- def extract_image_description(input_string):
485
- return input_string.split('gen_draw("')[1].split('")')[0]
486
- prompt=extract_image_description(string_temp)
487
- # model_id = "CompVis/stable-diffusion-v1-4"
488
- model_id='runwayml/stable-diffusion-v1-5'
489
- device = "cuda"
490
-
491
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
492
- pipe = pipe.to(device)
493
-
494
- # prompt = "a photo of an astronaut riding a horse on mars"
495
- image = pipe(prompt).images[0]
496
-
497
- image.save("astronaut_rides_horse.png")
498
- st.image(image)
499
- # image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
 
501
  elif ("vid_tube" in string_temp):
502
  s = Search(question)
 
389
  st.text(question)
390
  response = openai.Completion.create(
391
  model="text-davinci-003",
392
+ prompt=f'''Your name is HyperBot and knowledge cutoff date is 2021-09, and you are not aware of any events after that time. if the
393
+ Answer to following questions is not from your knowledge base or in case of queries like date, time, weather
394
+ updates / stock updates / current affairs / news or people which requires you to have internet connection then print i don't have access to internet to answer your question,
395
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
396
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
397
+ if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
398
+ if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
399
+ \nQuestion-{question}
400
+ \nAnswer -''',
401
  temperature=0.49,
402
  max_tokens=256,
403
  top_p=1,
 
407
  string_temp=response.choices[0].text
408
 
409
  if ("gen_draw" in string_temp):
410
+ try:
411
+ try:
412
+ wget.download(openai_response(prompt))
413
+ img2 = Image.open(wget.download(openai_response(prompt)))
414
+ img2.show()
415
+ rx = 'Image returned'
416
+ now = datetime.datetime.now()
417
+ date_time = now.strftime("%Y-%m-%d %H:%M:%S")
418
+ csv_logs(mytext, rx, date_time)
419
+ except:
420
+ urllib.request.urlretrieve(openai_response(prompt),"img_ret.png")
421
+ img = Image.open("img_ret.png")
422
+ img.show()
423
+ rx = 'Image returned'
424
+ now = datetime.datetime.now()
425
+ date_time = now.strftime("%Y-%m-%d %H:%M:%S")
426
+ csv_logs(mytext, rx, date_time)
427
+ except:
428
+ # Set up our initial generation parameters.
429
+ answers = stability_api.generate(
430
+ prompt = mytext,
431
+ seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
432
+ # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
433
+ # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
434
+ steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
435
+ cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
436
+ # Setting this value higher increases the strength in which it tries to match your prompt.
437
+ # Defaults to 7.0 if not specified.
438
+ width=512, # Generation width, defaults to 512 if not included.
439
+ height=512, # Generation height, defaults to 512 if not included.
440
+ samples=1, # Number of images to generate, defaults to 1 if not included.
441
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
442
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
443
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
444
+ )
445
+
446
+ for resp in answers:
447
+ for artifact in resp.artifacts:
448
+ if artifact.finish_reason == generation.FILTER:
449
+ warnings.warn(
450
+ "Your request activated the API's safety filters and could not be processed."
451
+ "Please modify the prompt and try again.")
452
+ if artifact.type == generation.ARTIFACT_IMAGE:
453
+ img = Image.open(io.BytesIO(artifact.binary))
454
+ st.image(img)
455
+ img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
456
+ rx = 'Image returned'
457
+ # g_sheet_log(mytext, rx)
458
+ csv_logs(mytext, rx, date_time)
459
+
460
  elif ("vid_tube" in string_temp):
461
  s = Search(question)
462
  search_res = s.results
 
497
  question = result
498
  response = openai.Completion.create(
499
  model="text-davinci-003",
500
+ prompt=f'''Your name is HyperBot and knowledge cutoff date is 2021-09, and you are not aware of any events after that time. if the
501
+ Answer to following questions is not from your knowledge base or in case of queries like date, time, weather
502
+ updates / stock updates / current affairs / news or people which requires you to have internet connection then print i don't have access to internet to answer your question,
503
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
504
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
505
+ if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
506
+ if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
507
+ \nQuestion-{question}
508
+ \nAnswer -''',
509
  temperature=0.49,
510
  max_tokens=256,
511
  top_p=1,
 
516
  string_temp=response.choices[0].text
517
 
518
  if ("gen_draw" in string_temp):
519
+ try:
520
+ try:
521
+ wget.download(openai_response(prompt))
522
+ img2 = Image.open(wget.download(openai_response(prompt)))
523
+ img2.show()
524
+ rx = 'Image returned'
525
+ now = datetime.datetime.now()
526
+ date_time = now.strftime("%Y-%m-%d %H:%M:%S")
527
+ csv_logs(mytext, rx, date_time)
528
+ except:
529
+ urllib.request.urlretrieve(openai_response(prompt),"img_ret.png")
530
+ img = Image.open("img_ret.png")
531
+ img.show()
532
+ rx = 'Image returned'
533
+ now = datetime.datetime.now()
534
+ date_time = now.strftime("%Y-%m-%d %H:%M:%S")
535
+ csv_logs(mytext, rx, date_time)
536
+ except:
537
+ # Set up our initial generation parameters.
538
+ answers = stability_api.generate(
539
+ prompt = mytext,
540
+ seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
541
+ # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
542
+ # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
543
+ steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
544
+ cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
545
+ # Setting this value higher increases the strength in which it tries to match your prompt.
546
+ # Defaults to 7.0 if not specified.
547
+ width=512, # Generation width, defaults to 512 if not included.
548
+ height=512, # Generation height, defaults to 512 if not included.
549
+ samples=1, # Number of images to generate, defaults to 1 if not included.
550
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
551
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
552
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
553
+ )
554
+
555
+ for resp in answers:
556
+ for artifact in resp.artifacts:
557
+ if artifact.finish_reason == generation.FILTER:
558
+ warnings.warn(
559
+ "Your request activated the API's safety filters and could not be processed."
560
+ "Please modify the prompt and try again.")
561
+ if artifact.type == generation.ARTIFACT_IMAGE:
562
+ img = Image.open(io.BytesIO(artifact.binary))
563
+ st.image(img)
564
+ img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
565
+ rx = 'Image returned'
566
+ # g_sheet_log(mytext, rx)
567
+ csv_logs(mytext, rx, date_time)
568
+
569
 
570
  elif ("vid_tube" in string_temp):
571
  s = Search(question)