pleonard commited on
Commit
ed83c38
•
1 Parent(s): 4f2c282

embedding choice

Browse files
TEST_sculley.jpg ADDED
TEST_spindler.jpg ADDED
__pycache__/app.cpython-38.pyc CHANGED
Binary files a/__pycache__/app.cpython-38.pyc and b/__pycache__/app.cpython-38.pyc differ
 
app.py CHANGED
@@ -25,6 +25,20 @@ dbackends = [
25
  ['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
26
  ]
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  dbackendinfo = 'Detectors with 🌈 require a color image.'
29
 
30
 
@@ -91,9 +105,10 @@ with gr.Blocks() as demo:
91
 
92
 
93
  with gr.Tab("Identify People in One Image"):
 
94
  oneimageannotations = []
95
- def identify_in_one_image(imgfile):
96
- oneimageresults = DeepFace.find(img_path=imgfile, db_path="db")
97
  oneimageresults = pd.concat(oneimageresults)
98
  for i, found_face in oneimageresults.iterrows():
99
  face_coordinates = (found_face["source_x"],found_face["source_y"], (found_face["source_x"] + found_face["source_w"]),(found_face["source_y"] + found_face["source_h"]))
@@ -107,14 +122,14 @@ with gr.Blocks() as demo:
107
  )
108
  return results, oneimageannotations
109
 
110
- oneimage_input_image = gr.Image()
111
 
112
  found_faces=gr.AnnotatedImage()
113
  debug_output = gr.Textbox()
114
  gr.Interface(
115
  allow_flagging = "never",
116
  fn=identify_in_one_image,
117
- inputs=oneimage_input_image,
118
  outputs=[found_faces, debug_output]
119
  )
120
 
 
25
  ['Fast Multi-task Cascade Convolutional Neural Network (PyTorch)','fastmtcnn']
26
  ]
27
 
28
+ embedding_backends = [
29
+ "VGG-Face",
30
+ "Facenet",
31
+ "Facenet512",
32
+ "OpenFace",
33
+ "DeepFace",
34
+ "DeepID",
35
+ "ArcFace",
36
+ "Dlib",
37
+ "SFace",
38
+ "GhostFaceNet",
39
+ ]
40
+
41
+
42
  dbackendinfo = 'Detectors with 🌈 require a color image.'
43
 
44
 
 
105
 
106
 
107
  with gr.Tab("Identify People in One Image"):
108
+ embedding_backendchoice = gr.Radio(choices=embedding_backends,label='Embedding Backend:',container=True,value='ArcFace')
109
  oneimageannotations = []
110
+ def identify_in_one_image(imgfile, embedding_backendchoice):
111
+ oneimageresults = DeepFace.find(img_path=imgfile, db_path="db", model_name=embedding_backendchoice)
112
  oneimageresults = pd.concat(oneimageresults)
113
  for i, found_face in oneimageresults.iterrows():
114
  face_coordinates = (found_face["source_x"],found_face["source_y"], (found_face["source_x"] + found_face["source_w"]),(found_face["source_y"] + found_face["source_h"]))
 
122
  )
123
  return results, oneimageannotations
124
 
125
+ oneimage_input_image = gr.Image(value="TEST_spindler.jpg")
126
 
127
  found_faces=gr.AnnotatedImage()
128
  debug_output = gr.Textbox()
129
  gr.Interface(
130
  allow_flagging = "never",
131
  fn=identify_in_one_image,
132
+ inputs=[oneimage_input_image, embedding_backendchoice],
133
  outputs=[found_faces, debug_output]
134
  )
135
 
db/ds_model_arcface_detector_opencv_aligned_normalization_base_expand_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c493963c11bc05abf1668a4c009d8a7259009b13c4c81721e8f1a3c77176683
3
+ size 28405