Upload 2 files
Browse files- face_classifier.py +39 -3
- vision_tools.py +30 -2
face_classifier.py
CHANGED
|
@@ -14,7 +14,8 @@ logger = logging.getLogger(__name__)
|
|
| 14 |
# Valores: 0.3 = permisivo (acepta muchos falsos positivos)
|
| 15 |
# 0.6 = balanceado
|
| 16 |
# 0.8 = estricto (elimina falsos positivos pero puede perder caras reales)
|
| 17 |
-
|
|
|
|
| 18 |
GENDER_NEUTRAL_THRESHOLD = 0.2 # Diferencia m铆nima para g茅nero neutro
|
| 19 |
|
| 20 |
|
|
@@ -37,19 +38,54 @@ def validate_and_classify_face(image_path: str) -> Optional[Dict[str, Any]]:
|
|
| 37 |
o None si falla completament
|
| 38 |
"""
|
| 39 |
try:
|
|
|
|
|
|
|
| 40 |
from deepface import DeepFace
|
| 41 |
|
| 42 |
print(f"[DeepFace] Analitzant: {image_path}")
|
| 43 |
|
| 44 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
result = DeepFace.analyze(
|
| 46 |
-
img_path=
|
| 47 |
actions=['gender'],
|
| 48 |
enforce_detection=True, # Intentar detectar cara
|
| 49 |
detector_backend='opencv',
|
| 50 |
silent=True
|
| 51 |
)
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
# DeepFace pot retornar llista si detecta m煤ltiples cares
|
| 54 |
if isinstance(result, list):
|
| 55 |
print(f"[DeepFace] Resultado es lista con {len(result)} elementos")
|
|
|
|
| 14 |
# Valores: 0.3 = permisivo (acepta muchos falsos positivos)
|
| 15 |
# 0.6 = balanceado
|
| 16 |
# 0.8 = estricto (elimina falsos positivos pero puede perder caras reales)
|
| 17 |
+
# 0.85 = MUY estricto (solo caras muy claras)
|
| 18 |
+
FACE_CONFIDENCE_THRESHOLD = 0.85 # MUY ESTRICTO: eliminar camisetas, letreros, etc.
|
| 19 |
GENDER_NEUTRAL_THRESHOLD = 0.2 # Diferencia m铆nima para g茅nero neutro
|
| 20 |
|
| 21 |
|
|
|
|
| 38 |
o None si falla completament
|
| 39 |
"""
|
| 40 |
try:
|
| 41 |
+
import cv2
|
| 42 |
+
import numpy as np
|
| 43 |
from deepface import DeepFace
|
| 44 |
|
| 45 |
print(f"[DeepFace] Analitzant: {image_path}")
|
| 46 |
|
| 47 |
+
# PREPROCESAMIENTO: Normalizar iluminaci贸n y mejorar contraste
|
| 48 |
+
# Esto reduce el impacto de luz/oscuridad en la detecci贸n
|
| 49 |
+
img = cv2.imread(str(image_path))
|
| 50 |
+
if img is None:
|
| 51 |
+
print(f"[DeepFace] No se pudo cargar la imagen: {image_path}")
|
| 52 |
+
return None
|
| 53 |
+
|
| 54 |
+
# Convertir a escala de grises (m谩s robusto para detecci贸n)
|
| 55 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 56 |
+
|
| 57 |
+
# CLAHE: Adaptive Histogram Equalization
|
| 58 |
+
# Normaliza el contraste de forma local, reduciendo efectos de luz
|
| 59 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
|
| 60 |
+
normalized = clahe.apply(gray)
|
| 61 |
+
|
| 62 |
+
# Volver a BGR para DeepFace
|
| 63 |
+
normalized_bgr = cv2.cvtColor(normalized, cv2.COLOR_GRAY2BGR)
|
| 64 |
+
|
| 65 |
+
# Guardar imagen preprocesada temporalmente
|
| 66 |
+
import tempfile
|
| 67 |
+
import os
|
| 68 |
+
temp_dir = tempfile.gettempdir()
|
| 69 |
+
temp_path = os.path.join(temp_dir, f"normalized_{os.path.basename(image_path)}")
|
| 70 |
+
cv2.imwrite(temp_path, normalized_bgr)
|
| 71 |
+
|
| 72 |
+
print(f"[DeepFace] Imagen preprocesada con CLAHE: {temp_path}")
|
| 73 |
+
|
| 74 |
+
# Analitzar g猫nere amb detecci贸 de cara (usando imagen normalizada)
|
| 75 |
result = DeepFace.analyze(
|
| 76 |
+
img_path=temp_path,
|
| 77 |
actions=['gender'],
|
| 78 |
enforce_detection=True, # Intentar detectar cara
|
| 79 |
detector_backend='opencv',
|
| 80 |
silent=True
|
| 81 |
)
|
| 82 |
|
| 83 |
+
# Limpiar archivo temporal
|
| 84 |
+
try:
|
| 85 |
+
os.remove(temp_path)
|
| 86 |
+
except:
|
| 87 |
+
pass
|
| 88 |
+
|
| 89 |
# DeepFace pot retornar llista si detecta m煤ltiples cares
|
| 90 |
if isinstance(result, list):
|
| 91 |
print(f"[DeepFace] Resultado es lista con {len(result)} elementos")
|
vision_tools.py
CHANGED
|
@@ -52,8 +52,36 @@ try:
|
|
| 52 |
except Exception:
|
| 53 |
face_recognition = None # type: ignore
|
| 54 |
|
| 55 |
-
# FaceRecognizer
|
| 56 |
-
DFRecognizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
try:
|
| 59 |
from deepface import DeepFace
|
|
|
|
| 52 |
except Exception:
|
| 53 |
face_recognition = None # type: ignore
|
| 54 |
|
| 55 |
+
# FaceRecognizer - Implementaci贸n directa con DeepFace
|
| 56 |
+
class DFRecognizer:
|
| 57 |
+
"""Wrapper simple para DeepFace como backend de embeddings."""
|
| 58 |
+
def __init__(self, model_name: str = 'Facenet512'):
|
| 59 |
+
self.model_name = model_name
|
| 60 |
+
if DeepFace is None:
|
| 61 |
+
raise ImportError("DeepFace not available")
|
| 62 |
+
|
| 63 |
+
def get_face_embedding_from_path(self, image_path: str) -> Optional[np.ndarray]:
|
| 64 |
+
"""Extrae embedding de cara usando DeepFace."""
|
| 65 |
+
try:
|
| 66 |
+
# Usar DeepFace para obtener embedding
|
| 67 |
+
embedding = DeepFace.represent(
|
| 68 |
+
img_path=image_path,
|
| 69 |
+
model_name=self.model_name,
|
| 70 |
+
enforce_detection=False, # No forzar detecci贸n (ya detectada)
|
| 71 |
+
detector_backend='skip'
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
if isinstance(embedding, list) and len(embedding) > 0:
|
| 75 |
+
# DeepFace.represent devuelve lista de diccionarios
|
| 76 |
+
emb = embedding[0].get('embedding')
|
| 77 |
+
if emb:
|
| 78 |
+
return np.array(emb, dtype=float)
|
| 79 |
+
|
| 80 |
+
return None
|
| 81 |
+
|
| 82 |
+
except Exception as e:
|
| 83 |
+
log.debug("DeepFace embedding failed for %s: %s", image_path, e)
|
| 84 |
+
return None
|
| 85 |
|
| 86 |
try:
|
| 87 |
from deepface import DeepFace
|