pleonard commited on
Commit
589a4f1
1 Parent(s): ab4f2c1

clicking on face

Browse files
Files changed (1) hide show
  1. app.py +49 -33
app.py CHANGED
@@ -5,6 +5,7 @@ import PIL
5
  from PIL import Image, ImageDraw, ImageFont
6
  import time
7
 
 
8
  dbackends = [
9
  ['Haar Cascade (OpenCV)','opencv'],
10
  ['Single Shot MultiBox Detector (OpenCV)','ssd'],
@@ -12,44 +13,59 @@ dbackends = [
12
  ['RetinaFace','retinaface'],
13
  ['You Only Look Once v8','yolov8'],
14
  ['YuNet','yunet'],
15
- ['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'],
16
  ['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
17
  ]
18
 
19
- annotated_image = gr.AnnotatedImage()
20
- jsontext = gr.Text(label= "deepface extract_faces results",)
21
-
22
- def findFaces(imgfile,dbackend):
23
- start_time = time.time()
24
- print(start_time)
25
-
26
- face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
27
- numberoffaces = len(face_objs)
28
- jsontext = ''
29
- faceannotations = []
30
- for face_obj in face_objs:
31
- face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
32
- face_confidence = "{:.0%}".format(face_obj["confidence"])
33
- face_result=[face_coordinates,face_confidence]
34
- faceannotations.append(face_result)
35
- jsontext=faceannotations
36
- run_time = str(round((time.time() - start_time),2))
37
- results = gr.AnnotatedImage(
38
- label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
39
- value=(imgfile, faceannotations)
40
- )
41
 
42
- print(run_time)
43
- return(results,jsontext,numberoffaces,run_time)
44
-
45
- dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',value='retinaface')
46
- demo = gr.Interface(
47
- allow_flagging = "never",
48
- fn=findFaces,
49
- inputs=[gr.Image(value="8428_26_SM.jpg"), dbackendchoice],
50
- outputs=[annotated_image,jsontext],
51
 
52
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  demo.launch(show_error=True)
55
 
 
5
  from PIL import Image, ImageDraw, ImageFont
6
  import time
7
 
8
+
9
  dbackends = [
10
  ['Haar Cascade (OpenCV)','opencv'],
11
  ['Single Shot MultiBox Detector (OpenCV)','ssd'],
 
13
  ['RetinaFace','retinaface'],
14
  ['You Only Look Once v8','yolov8'],
15
  ['YuNet','yunet'],
16
+ # ['Multi-task Cascade Convolutional Neural Network (TensorFlow) ','mtcnn'],
17
  ['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
18
  ]
19
 
20
+
21
+ with gr.Blocks() as demo:
22
+
23
+
24
+ annotated_image = gr.AnnotatedImage()
25
+
26
+ jsontext = gr.Text(label= "deepface extract_faces results")
27
+ selected_face = gr.Textbox(label="Selected Face")
28
+
29
+
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ def findFaces(imgfile,dbackend):
32
+ start_time = time.time()
33
+ print(start_time)
 
 
 
 
 
 
34
 
35
+ face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
36
+
37
+ numberoffaces = len(face_objs)
38
+ jsontext = ''
39
+ global faceannotations
40
+ faceannotations = []
41
+ for face_obj in face_objs:
42
+ face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
43
+ face_confidence = "{:.0%}".format(face_obj["confidence"])
44
+ face_result=[face_coordinates,face_confidence]
45
+ faceannotations.append(face_result)
46
+
47
+ jsontext=faceannotations
48
+ #jsontext=face_objs
49
+ run_time = str(round((time.time() - start_time),2))
50
+ results = gr.AnnotatedImage(
51
+ label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
52
+ value=(imgfile, faceannotations)
53
+ )
54
+
55
+ print(run_time)
56
+ return(results,jsontext,numberoffaces,run_time)
57
+
58
+ dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',value='retinaface')
59
+ gr.Interface(
60
+ allow_flagging = "never",
61
+ fn=findFaces,
62
+ inputs=[gr.Image(value="8428_26_SM.jpg"), dbackendchoice],
63
+ outputs=[annotated_image,jsontext,selected_face],
64
+ )
65
+ def select_section(evt: gr.SelectData):
66
+ return faceannotations[evt.index]
67
+
68
+ annotated_image.select(select_section, None, selected_face)
69
 
70
  demo.launch(show_error=True)
71