File size: 2,849 Bytes
8da06e3
 
 
 
 
 
 
589a4f1
8da06e3
e036234
bd3a391
 
ab4f2c1
 
0ed78ff
bd3a391
ab4f2c1
8da06e3
 
0ed78ff
 
589a4f1
 
 
7beb285
589a4f1
 
216aba4
7beb285
 
589a4f1
 
8da06e3
589a4f1
 
 
8da06e3
589a4f1
 
 
 
 
 
216aba4
589a4f1
216aba4
589a4f1
 
 
216aba4
589a4f1
 
 
 
 
 
 
 
7beb285
589a4f1
0ed78ff
589a4f1
 
 
7beb285
 
589a4f1
 
7beb285
 
 
 
589a4f1
7beb285
8da06e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
from deepface import DeepFace
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFont
import time


dbackends = [
  ['Haar Cascade (OpenCV)','opencv'], 
  #['๐ŸŒˆ Single Shot MultiBox Detector (OpenCV)','ssd'], # for whatever reason fails
  #['Histogram of Oriented Gradients (Dlib)','dlib'], # dlib seems broken on modern ubuntu
  ['RetinaFace','retinaface'],
  ['You Only Look Once v8','yolov8'],
  ['๐ŸŒˆ YuNet','yunet'],
	#['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'], 
  ['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
]

dbackendinfo = 'Detectors with ๐ŸŒˆ require a color image.'


with gr.Blocks() as demo:

	input_image = gr.Image(value="8428_26_SM.jpg")
	annotated_image = gr.AnnotatedImage()

	#jsontext =  gr.Text(label= "deepface extract_faces results")
	selected_face_info = gr.Textbox(label="Selected Face Info", value="Click on a face above")
	selected_face_pic = gr.Image(label="Selected Face", value="Click on a face above")

		
	
	def findFaces(imgfile,dbackend):
		start_time = time.time()
		print(start_time)
	
		face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)

		numberoffaces = len(face_objs)
		jsontext = ''
		global faceannotations
		faceannotations = []
		for i, face_obj in enumerate(face_objs,1):
			face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
			face_confidence =  "Face " + str(i) + ": "+ "{:.0%}".format(face_obj["confidence"])
			face_result=[face_coordinates,face_confidence]
			faceannotations.append(face_result)
		
		#jsontext=faceannotations
		#jsontext=face_objs
		run_time = str(round((time.time() - start_time),2))
		results = gr.AnnotatedImage(
			label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
			value=(imgfile, faceannotations)
		)

		print(run_time)
		return(results,numberoffaces,run_time,)

	dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',info=dbackendinfo,container=True,value='retinaface')
	gr.Interface(
		allow_flagging = "never",
		fn=findFaces,
		inputs=[input_image, dbackendchoice],
		outputs=[annotated_image,selected_face_info,selected_face_pic],
	)
	def select_section(evt: gr.SelectData):
		cropped_image = np.array(Image.open(input_image.value['path']))
		cropped_image = cropped_image[faceannotations[evt.index][0][1]:faceannotations[evt.index][0][3], faceannotations[evt.index][0][0]:faceannotations[evt.index][0][2]]
		return faceannotations[evt.index], cropped_image


	annotated_image.select(select_section, None, [selected_face_info,selected_face_pic])

demo.launch(show_error=True)