facefinder / app.py
pleonard's picture
naming
e036234
raw
history blame
1.8 kB
import gradio as gr
from deepface import DeepFace
import numpy as np
import PIL
from PIL import Image, ImageDraw, ImageFont
import time
dbackends = [
['Haar Cascade (OpenCV)','opencv'],
['Single Shot MultiBox Detector (OpenCV)','ssd'],
['Histogram of Oriented Gradients (Dlib)','dlib'],
['Multi-task CNN ','mtcnn'],
['retinaface','retinaface'],
['yolov8','yolov8'],
['yunet','yunet'],
['fastmtcnn','fastmtcnn']
]
annotated_image = gr.AnnotatedImage()
jsontext = gr.Text(label= "deepface extract_faces results",)
def findFaces(imgfile,dbackend):
start_time = time.time()
print(start_time)
face_objs = DeepFace.extract_faces(img_path = imgfile, enforce_detection = False, detector_backend = dbackend)
numberoffaces = len(face_objs)
jsontext = ''
faceannotations = []
for face_obj in face_objs:
face_coordinates = (face_obj["facial_area"]["x"],face_obj["facial_area"]["y"], (face_obj["facial_area"]["x"] + face_obj["facial_area"]["w"]),(face_obj["facial_area"]["y"] + face_obj["facial_area"]["h"]))
face_confidence = "{:.0%}".format(face_obj["confidence"])
face_result=[face_coordinates,face_confidence]
faceannotations.append(face_result)
jsontext=faceannotations
run_time = str(round((time.time() - start_time),2))
results = gr.AnnotatedImage(
label= "Detected " + str(numberoffaces) + " faces via " + dbackend + ' in ' + run_time + ' seconds.',
value=(imgfile, faceannotations)
)
print(run_time)
return(results,jsontext,numberoffaces,run_time)
dbackendchoice = gr.Radio(choices=dbackends,label='Detector Backend:',value='retinaface')
demo = gr.Interface(
allow_flagging = "never",
fn=findFaces,
inputs=[gr.Image(value="8428_26_SM.jpg"), dbackendchoice],
outputs=[annotated_image,jsontext],
)
demo.launch(show_error=True)