imagedetect / app.py
popeyewrener
test
c44106a
raw
history blame
No virus
6.5 kB
from fastapi import FastAPI
import numpy as np
import cv2
# import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from sklearn.preprocessing import Normalizer
# from google.colab import files
# from google.colab.patches import cv2_imshow
from sklearn.metrics.pairwise import cosine_similarity
from base64 import b64decode
# Load the ResNet50 model pre-trained on ImageNet
resnet_model = ResNet50(weights='imagenet', include_top=False, pooling='avg')
# Load Haar Cascade for face detection
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
app = FastAPI()
# Function to preprocess the input image for the ResNet model
def preprocess_image(img):
img = cv2.resize(img, (224, 224)) # Resize to match ResNet input size (224x224)
img = np.expand_dims(img, axis=0) # Add batch dimension
img = preprocess_input(img) # Preprocess according to ResNet input expectations
return img
# Function to extract face embedding using the ResNet model
def get_face_embedding(image):
preprocessed_img = preprocess_image(image)
embedding = resnet_model.predict(preprocessed_img)
return embedding[0]
# Function to extract faces from image using OpenCV and return the bounding box and cropped face
def extract_faces(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.4, minNeighbors=5, minSize=(30, 30))
if len(faces) == 0:
print("No faces detected.")
return []
extracted_faces = []
for (x, y, w, h) in faces:
face = image[y:y+h, x:x+w]
extracted_faces.append((face, (x, y, w, h))) # Return face and bounding box
return extracted_faces
# Upload ID document image
# def upload_id_document():
# uploaded = files.upload()
# for fn in uploaded.keys():
# img = cv2.imread(fn)
# cv2_imshow(img)
# return img
# Capture live image using webcam (using JavaScript in Colab)
# Start the verification process
# def verify_identity():
# print("Please upload your ID document (with a clear face).")
# id_document_image = upload_id_document()
# # Extract face from ID document
# id_faces = extract_faces(id_document_image)
# if len(id_faces) == 0:
# print("No face detected in the ID document.")
# return
# id_face, _ = id_faces[0] # Use the first detected face from ID document
# id_face_embedding = get_face_embedding(id_face)
# print("\nNow capturing the live image from your webcam. Please ensure good lighting.")
# #live_image = capture_live_image()
# # Detect face in the live image (without extracting it)
# live_faces = face_cascade.detectMultiScale(cv2.cvtColor(live_image, cv2.COLOR_BGR2GRAY), scaleFactor=1.4, minNeighbors=5, minSize=(30, 30))
# if len(live_faces) == 0:
# print("No face detected in the live image.")
# return
# # Process the first detected face in the live image
# for (x, y, w, h) in live_faces:
# live_face = live_image[y:y+h, x:x+w] # Crop the face from the live image
# live_face_embedding = get_face_embedding(live_face) # Get face embedding
# # Normalize embeddings
# normalizer = Normalizer()
# id_face_embedding_normalized = normalizer.transform(id_face_embedding.reshape(1, -1))
# live_face_embedding_normalized = normalizer.transform(live_face_embedding.reshape(1, -1))
# # Compute cosine similarity between ID face and live face
# similarity = cosine_similarity(id_face_embedding_normalized, live_face_embedding_normalized)
# print(f"Similarity with live face: {similarity[0][0]:.4f}")
# # Draw bounding box around the live face
# if similarity[0][0] > 0.6: # Adjust similarity threshold as needed
# cv2.rectangle(live_image, (x, y), (x + w, y + h), (0, 255, 0), 2) # Green box for match
# cv2.putText(live_image, f"Match: {similarity[0][0]:.2f}", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
# print("Face matched!")
# else:
# cv2.rectangle(live_image, (x, y), (x + w, y + h), (0, 0, 255), 2) # Red box for no match
# cv2.putText(live_image, f"No Match", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
# print("Face did not match.")
# # Display the live image with the bounding box
# cv2_imshow(live_image)
# Start the verification process
@app.get("/")
def greet_json():
return {"Hello": "World!"}
@app.post("/verify")
# input json taking the image1 and image2 as base64 encoded string
def verify(image1: str, image2: str):
# Decode the base64 encoded string to OpenCV image
id_document_image = cv2.imdecode(np.frombuffer(b64decode(image1), np.uint8), -1)
live_image = cv2.imdecode(np.frombuffer(b64decode(image2), np.uint8), -1)
# Extract face from ID document
id_faces = extract_faces(id_document_image)
if len(id_faces) == 0:
return {"message": "No face detected in the ID document."}
id_face, _ = id_faces[0]
id_face_embedding = get_face_embedding(id_face)
# Detect face in the live image (without extracting it)
live_faces = face_cascade.detectMultiScale(cv2.cvtColor(live_image, cv2.COLOR_BGR2GRAY), scaleFactor=1.4, minNeighbors=5, minSize=(30, 30))
if len(live_faces) == 0:
return {"message": "No face detected in the live image."}
# Process the first detected face in the live image
for (x, y, w, h) in live_faces:
live_face = live_image[y:y+h, x:x+w]
live_face_embedding = get_face_embedding(live_face)
# Normalize embeddings
normalizer = Normalizer()
id_face_embedding_normalized = normalizer.transform(id_face_embedding.reshape(1, -1))
live_face_embedding_normalized = normalizer.transform(live_face_embedding.reshape(1, -1))
# Compute cosine similarity between ID face and live face
similarity = cosine_similarity(id_face_embedding_normalized, live_face_embedding_normalized)
# Draw bounding box around the live face
if similarity[0][0] > 0.6:
return {"message": "Face matched!", "similarity": similarity[0][0]}
else:
return {"message": "Face did not match.", "similarity": similarity[0][0]}
return {"message": "Face did not match."}