mirror of
https://github.com/mudler/LocalAI.git
synced 2026-04-30 12:08:13 -04:00
feat(insightface): add antispoofing (liveness) detection (#9515)
* feat(insightface): add antispoofing (liveness) detection
Light up the anti_spoofing flag that was parked during the first pass.
Both FaceVerify and FaceAnalyze now run the Silent-Face MiniFASNetV2 +
MiniFASNetV1SE ensemble (~4 MB, Apache 2.0, CPU <10ms) when the flag is
set. Failed liveness on either image vetoes FaceVerify regardless of
embedding similarity. Every insightface* gallery entry now ships the
MiniFASNet ONNX weights so existing packs light up after reinstall.
Setting the flag against a model without the MiniFASNet files returns
FAILED_PRECONDITION (HTTP 412) with a clear install message — no
silent is_real=false.
FaceVerifyResponse gained per-image img{1,2}_is_real and
img{1,2}_antispoof_score (proto 9-12); FaceAnalysis's existing
is_real/antispoof_score fields are now populated. Schema fields are
pointers so they are fully absent from the JSON response when
anti_spoofing was not requested — avoids collapsing "not checked" with
"checked and fake" under Go's omitempty on bool.
Validated end-to-end over HTTP against a local install:
- verify + anti_spoofing, both real -> verified=true, score ~0.76
- verify + anti_spoofing, img2 spoof -> verified=false, img2_is_real=false
- analyze + anti_spoofing -> is_real and score per face
- flag against model without MiniFASNet -> HTTP 412 fail-loud
Assisted-by: Claude:claude-opus-4-7 go vet
* test(insightface): wire test target into test-extra
The root Makefile's `test-extra` already runs
`$(MAKE) -C backend/python/insightface test`, but the backend's
Makefile never defined the target — so the command silently errored
and the suite was never executed in CI. Adding the two-line target
(matching ace-step/Makefile) hooks `test.sh` → `runUnittests` →
`python -m unittest test.py`, which discovers both the pre-existing
engine classes (InsightFaceEngineTest, OnnxDirectEngineTest) and the
new AntispoofingTest. Each class skips gracefully when its weights
can't be downloaded from a network-restricted runner.
Assisted-by: Claude:claude-opus-4-7
* test(insightface): exercise antispoofing in e2e-backends (both paths)
Add a `face_antispoof` capability to the Ginkgo e2e suite and extend
the existing FaceVerify + FaceAnalyze specs with liveness assertions
covering BOTH paths:
real fixture -> is_real=true, score>0, verified stays true
spoof fixture -> is_real=false, verified vetoed to false
The spoof fixture is upstream's own `image_F2.jpg` (via the yakhyo
mirror) — verified locally against the MiniFASNetV2+V1SE ensemble to
classify as is_real=false with score ~0.013. That makes the assertion
deterministic across CI runs; synthetic/derived spoofs fool the model
unpredictably and would be flaky.
Makefile wires it up end-to-end:
- New INSIGHTFACE_ANTISPOOF_* cache dir + two ONNX downloads with
pinned SHAs, matching the gallery entries.
- insightface-antispoof-models target shared by both backend configs.
- FACE_SPOOF_IMAGE_URL passed via BACKEND_TEST_FACE_SPOOF_IMAGE_URL.
- Both e2e targets (buffalo-sc + opencv) now:
* depend on insightface-antispoof-models
* pass antispoof_v2_onnx / antispoof_v1se_onnx in BACKEND_TEST_OPTIONS
* include face_antispoof in BACKEND_TEST_CAPS
backend_test.go adds the new capability constant and a faceSpoofFile
fixture resolved the same way as faceFile1/2/3. Spoof assertions are
gated on both capFaceAntispoof AND faceSpoofFile being set, so a test
config that omits the spoof fixture degrades gracefully to "real path
only" instead of failing.
Assisted-by: Claude:claude-opus-4-7 go vet
This commit is contained in:
committed by
GitHub
parent
c1f923b2bc
commit
f5eb13d3c2
@@ -11,3 +11,6 @@ protogen-clean:
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
|
||||
test: install
|
||||
bash test.sh
|
||||
|
||||
@@ -180,23 +180,57 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
verified = distance < threshold
|
||||
confidence = max(0.0, min(100.0, (1.0 - distance / threshold) * 100.0)) if threshold > 0 else 0.0
|
||||
|
||||
def _region(img) -> backend_pb2.FacialArea:
|
||||
# Detect once per image — region is needed for the response and
|
||||
# potentially for the antispoof crop. Returns the highest-score face.
|
||||
def _best_detection(img):
|
||||
dets = self.engine.detect(img)
|
||||
if not dets:
|
||||
return None
|
||||
return max(dets, key=lambda d: d.score)
|
||||
|
||||
def _region(det) -> backend_pb2.FacialArea:
|
||||
if det is None:
|
||||
return backend_pb2.FacialArea()
|
||||
best = max(dets, key=lambda d: d.score)
|
||||
x1, y1, x2, y2 = best.bbox
|
||||
x1, y1, x2, y2 = det.bbox
|
||||
return backend_pb2.FacialArea(x=x1, y=y1, w=x2 - x1, h=y2 - y1)
|
||||
|
||||
det1 = _best_detection(img1)
|
||||
det2 = _best_detection(img2)
|
||||
|
||||
img1_is_real = False
|
||||
img1_score = 0.0
|
||||
img2_is_real = False
|
||||
img2_score = 0.0
|
||||
if request.anti_spoofing:
|
||||
spoof1 = self.engine.antispoof(img1, det1.bbox) if det1 is not None else None
|
||||
spoof2 = self.engine.antispoof(img2, det2.bbox) if det2 is not None else None
|
||||
if spoof1 is None or spoof2 is None:
|
||||
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
|
||||
context.set_details(
|
||||
"anti_spoofing requested but no antispoof model is loaded — "
|
||||
"install `silent-face-antispoofing` or pick a gallery entry "
|
||||
"that bundles MiniFASNet weights"
|
||||
)
|
||||
return backend_pb2.FaceVerifyResponse()
|
||||
img1_is_real, img1_score = spoof1.is_real, spoof1.score
|
||||
img2_is_real, img2_score = spoof2.is_real, spoof2.score
|
||||
# Failed liveness vetoes verification regardless of similarity.
|
||||
if not (img1_is_real and img2_is_real):
|
||||
verified = False
|
||||
|
||||
return backend_pb2.FaceVerifyResponse(
|
||||
verified=verified,
|
||||
distance=float(distance),
|
||||
threshold=float(threshold),
|
||||
confidence=float(confidence),
|
||||
model=self.model_name or self.engine_name,
|
||||
img1_area=_region(img1),
|
||||
img2_area=_region(img2),
|
||||
img1_area=_region(det1),
|
||||
img2_area=_region(det2),
|
||||
processing_time_ms=float((time.time() - start) * 1000.0),
|
||||
img1_is_real=img1_is_real,
|
||||
img1_antispoof_score=float(img1_score),
|
||||
img2_is_real=img2_is_real,
|
||||
img2_antispoof_score=float(img2_score),
|
||||
)
|
||||
|
||||
def FaceAnalyze(self, request, context):
|
||||
@@ -223,6 +257,19 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
fa.dominant_gender = attrs.dominant_gender
|
||||
for k, v in attrs.gender.items():
|
||||
fa.gender[k] = float(v)
|
||||
if request.anti_spoofing:
|
||||
bbox = (float(x), float(y), float(x + w), float(y + h))
|
||||
spoof = self.engine.antispoof(img, bbox)
|
||||
if spoof is None:
|
||||
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
|
||||
context.set_details(
|
||||
"anti_spoofing requested but no antispoof model is loaded — "
|
||||
"install `silent-face-antispoofing` or pick a gallery entry "
|
||||
"that bundles MiniFASNet weights"
|
||||
)
|
||||
return backend_pb2.FaceAnalyzeResponse()
|
||||
fa.is_real = spoof.is_real
|
||||
fa.antispoof_score = float(spoof.score)
|
||||
faces.append(fa)
|
||||
return backend_pb2.FaceAnalyzeResponse(faces=faces)
|
||||
|
||||
|
||||
@@ -41,6 +41,12 @@ class FaceAttributes:
|
||||
gender: dict[str, float] = field(default_factory=dict)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SpoofResult:
|
||||
is_real: bool
|
||||
score: float # averaged probability of the "real" class, 0.0-1.0
|
||||
|
||||
|
||||
class FaceEngine(Protocol):
|
||||
"""Minimal interface every engine must implement."""
|
||||
|
||||
@@ -48,6 +54,121 @@ class FaceEngine(Protocol):
|
||||
def detect(self, img: np.ndarray) -> list[FaceDetection]: ...
|
||||
def embed(self, img: np.ndarray) -> np.ndarray | None: ...
|
||||
def analyze(self, img: np.ndarray) -> list[FaceAttributes]: ...
|
||||
# Optional: returns None when no antispoof model is loaded.
|
||||
def antispoof(self, img: np.ndarray, bbox: tuple[float, float, float, float]) -> SpoofResult | None: ...
|
||||
|
||||
|
||||
# ─── Antispoofer (Silent-Face MiniFASNet) ──────────────────────────────
|
||||
|
||||
class Antispoofer:
|
||||
"""Liveness detector using the Silent-Face MiniFASNet ensemble.
|
||||
|
||||
Loads up to two ONNX exports (MiniFASNetV2 at scale 2.7 and
|
||||
MiniFASNetV1SE at scale 4.0). Both are 80x80 BGR-float32-input
|
||||
classifiers with 3 output logits where index 1 = "real". When both
|
||||
are loaded, softmax outputs are averaged before argmax — the same
|
||||
ensembling the upstream `test.py` does.
|
||||
|
||||
Preprocessing matches yakhyo/face-anti-spoofing's reference impl:
|
||||
each model gets its own scale-expanded crop centered on the face
|
||||
bbox, resized to 80x80, fed straight as float32 BGR (no /255, no
|
||||
mean/std). See `_crop_face` for the bbox math.
|
||||
|
||||
A single model also works (the missing one is simply skipped).
|
||||
"""
|
||||
|
||||
INPUT_SIZE = (80, 80) # h, w
|
||||
REAL_CLASS_IDX = 1
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._sessions: list[tuple[Any, float, str, str]] = [] # (session, scale, input_name, output_name)
|
||||
self.threshold: float = 0.5
|
||||
|
||||
def load(self, model_paths: list[tuple[str, float]], threshold: float = 0.5) -> None:
|
||||
"""Load one or more (path, scale) pairs."""
|
||||
import onnxruntime as ort
|
||||
|
||||
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
|
||||
for path, scale in model_paths:
|
||||
session = ort.InferenceSession(path, providers=providers)
|
||||
input_name = session.get_inputs()[0].name
|
||||
output_name = session.get_outputs()[0].name
|
||||
self._sessions.append((session, float(scale), input_name, output_name))
|
||||
self.threshold = float(threshold)
|
||||
|
||||
@property
|
||||
def loaded(self) -> bool:
|
||||
return bool(self._sessions)
|
||||
|
||||
def _crop_face(self, img: np.ndarray, bbox: tuple[float, float, float, float], scale: float) -> np.ndarray:
|
||||
# bbox is (x1, y1, x2, y2) in source-image coordinates.
|
||||
src_h, src_w = img.shape[:2]
|
||||
x1, y1, x2, y2 = bbox
|
||||
box_w = max(1.0, x2 - x1)
|
||||
box_h = max(1.0, y2 - y1)
|
||||
|
||||
# Clamp scale so the expanded crop fits inside the source image.
|
||||
scale = min((src_h - 1) / box_h, (src_w - 1) / box_w, scale)
|
||||
new_w = box_w * scale
|
||||
new_h = box_h * scale
|
||||
|
||||
cx = x1 + box_w / 2.0
|
||||
cy = y1 + box_h / 2.0
|
||||
|
||||
cx1 = max(0, int(cx - new_w / 2.0))
|
||||
cy1 = max(0, int(cy - new_h / 2.0))
|
||||
cx2 = min(src_w - 1, int(cx + new_w / 2.0))
|
||||
cy2 = min(src_h - 1, int(cy + new_h / 2.0))
|
||||
|
||||
cropped = img[cy1 : cy2 + 1, cx1 : cx2 + 1]
|
||||
if cropped.size == 0:
|
||||
cropped = img
|
||||
out_h, out_w = self.INPUT_SIZE
|
||||
return cv2.resize(cropped, (out_w, out_h))
|
||||
|
||||
@staticmethod
|
||||
def _softmax(x: np.ndarray) -> np.ndarray:
|
||||
e = np.exp(x - np.max(x, axis=1, keepdims=True))
|
||||
return e / e.sum(axis=1, keepdims=True)
|
||||
|
||||
def predict(self, img: np.ndarray, bbox: tuple[float, float, float, float]) -> SpoofResult:
|
||||
if not self._sessions:
|
||||
raise RuntimeError("Antispoofer.predict called with no models loaded")
|
||||
accum = np.zeros((1, 3), dtype=np.float32)
|
||||
for session, scale, input_name, output_name in self._sessions:
|
||||
face = self._crop_face(img, bbox, scale).astype(np.float32)
|
||||
tensor = np.transpose(face, (2, 0, 1))[np.newaxis, ...]
|
||||
logits = session.run([output_name], {input_name: tensor})[0]
|
||||
accum += self._softmax(logits)
|
||||
accum /= float(len(self._sessions))
|
||||
real_prob = float(accum[0, self.REAL_CLASS_IDX])
|
||||
is_real = int(np.argmax(accum)) == self.REAL_CLASS_IDX and real_prob >= self.threshold
|
||||
return SpoofResult(is_real=is_real, score=real_prob)
|
||||
|
||||
|
||||
def _build_antispoofer(options: dict[str, str], model_dir: str | None) -> Antispoofer | None:
|
||||
"""Instantiate an Antispoofer from option keys, or return None.
|
||||
|
||||
Recognised options:
|
||||
antispoof_v2_onnx — path/filename of MiniFASNetV2 (scale 2.7)
|
||||
antispoof_v1se_onnx — path/filename of MiniFASNetV1SE (scale 4.0)
|
||||
antispoof_threshold — real-class probability threshold, default 0.5
|
||||
|
||||
Either or both can be provided. Returns None when neither is set.
|
||||
"""
|
||||
pairs: list[tuple[str, float]] = []
|
||||
v2 = options.get("antispoof_v2_onnx", "")
|
||||
if v2:
|
||||
pairs.append((_resolve_model_path(v2, model_dir=model_dir), 2.7))
|
||||
v1se = options.get("antispoof_v1se_onnx", "")
|
||||
if v1se:
|
||||
pairs.append((_resolve_model_path(v1se, model_dir=model_dir), 4.0))
|
||||
if not pairs:
|
||||
return None
|
||||
threshold = float(options.get("antispoof_threshold", "0.5"))
|
||||
spoofer = Antispoofer()
|
||||
spoofer.load(pairs, threshold=threshold)
|
||||
return spoofer
|
||||
|
||||
|
||||
# ─── InsightFaceEngine ────────────────────────────────────────────────
|
||||
@@ -80,6 +201,7 @@ class InsightFaceEngine:
|
||||
self.det_size: tuple[int, int] = (640, 640)
|
||||
self.det_thresh: float = 0.5
|
||||
self._providers: list[str] = ["CPUExecutionProvider"]
|
||||
self._antispoofer: Antispoofer | None = None
|
||||
|
||||
def prepare(self, options: dict[str, str]) -> None:
|
||||
import glob
|
||||
@@ -90,6 +212,7 @@ class InsightFaceEngine:
|
||||
self.model_pack = options.get("model_pack", "buffalo_l")
|
||||
self.det_size = _parse_det_size(options.get("det_size", "640x640"))
|
||||
self.det_thresh = float(options.get("det_thresh", "0.5"))
|
||||
self._antispoofer = _build_antispoofer(options, options.get("_model_dir"))
|
||||
|
||||
pack_dir = _locate_insightface_pack(options, self.model_pack)
|
||||
if pack_dir is None:
|
||||
@@ -187,6 +310,11 @@ class InsightFaceEngine:
|
||||
out.append(attrs)
|
||||
return out
|
||||
|
||||
def antispoof(self, img: np.ndarray, bbox: tuple[float, float, float, float]) -> SpoofResult | None:
|
||||
if self._antispoofer is None or not self._antispoofer.loaded:
|
||||
return None
|
||||
return self._antispoofer.predict(img, bbox)
|
||||
|
||||
|
||||
# ─── OnnxDirectEngine ─────────────────────────────────────────────────
|
||||
|
||||
@@ -206,6 +334,7 @@ class OnnxDirectEngine:
|
||||
self.det_thresh: float = 0.5
|
||||
self._detector: Any = None
|
||||
self._recognizer: Any = None
|
||||
self._antispoofer: Antispoofer | None = None
|
||||
|
||||
def prepare(self, options: dict[str, str]) -> None:
|
||||
raw_det = options.get("detector_onnx", "")
|
||||
@@ -219,6 +348,7 @@ class OnnxDirectEngine:
|
||||
self.recognizer_path = _resolve_model_path(raw_rec, model_dir=model_dir)
|
||||
self.input_size = _parse_det_size(options.get("det_size", "320x320"))
|
||||
self.det_thresh = float(options.get("det_thresh", "0.5"))
|
||||
self._antispoofer = _build_antispoofer(options, model_dir)
|
||||
|
||||
# YuNet is a fixed-size detector; size is reset per detect() call to
|
||||
# match the input frame.
|
||||
@@ -286,6 +416,11 @@ class OnnxDirectEngine:
|
||||
for d in self.detect(img)
|
||||
]
|
||||
|
||||
def antispoof(self, img: np.ndarray, bbox: tuple[float, float, float, float]) -> SpoofResult | None:
|
||||
if self._antispoofer is None or not self._antispoofer.loaded:
|
||||
return None
|
||||
return self._antispoofer.predict(img, bbox)
|
||||
|
||||
|
||||
# ─── helpers ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import sys
|
||||
import unittest
|
||||
|
||||
import cv2
|
||||
import grpc
|
||||
import numpy as np
|
||||
|
||||
sys.path.insert(0, os.path.dirname(__file__))
|
||||
@@ -39,6 +40,44 @@ OPENCV_FILES = [
|
||||
),
|
||||
]
|
||||
|
||||
# Silent-Face MiniFASNet ONNX files for antispoofing tests.
|
||||
ANTISPOOF_FILES = [
|
||||
(
|
||||
"MiniFASNetV2.onnx",
|
||||
"https://github.com/yakhyo/face-anti-spoofing/releases/download/weights/MiniFASNetV2.onnx",
|
||||
"b32929adc2d9c34b9486f8c4c7bc97c1b69bc0ea9befefc380e4faae4e463907",
|
||||
),
|
||||
(
|
||||
"MiniFASNetV1SE.onnx",
|
||||
"https://github.com/yakhyo/face-anti-spoofing/releases/download/weights/MiniFASNetV1SE.onnx",
|
||||
"ebab7f90c7833fbccd46d3a555410e78d969db5438e169b6524be444862b3676",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
def _download_files(specs: list[tuple[str, str, str]], env_var: str, prefix: str) -> str | None:
|
||||
"""Download a list of (filename, uri, sha256) into a directory.
|
||||
|
||||
Returns the directory, or None if any download failed.
|
||||
"""
|
||||
import hashlib
|
||||
import tempfile
|
||||
import urllib.request
|
||||
|
||||
root = os.environ.get(env_var) or tempfile.mkdtemp(prefix=prefix)
|
||||
for filename, uri, sha256 in specs:
|
||||
dest = os.path.join(root, filename)
|
||||
if os.path.isfile(dest):
|
||||
if hashlib.sha256(open(dest, "rb").read()).hexdigest() == sha256:
|
||||
continue
|
||||
try:
|
||||
urllib.request.urlretrieve(uri, dest)
|
||||
except Exception:
|
||||
return None
|
||||
if hashlib.sha256(open(dest, "rb").read()).hexdigest() != sha256:
|
||||
return None
|
||||
return root
|
||||
|
||||
|
||||
def _encode(img: np.ndarray) -> str:
|
||||
_, buf = cv2.imencode(".jpg", img)
|
||||
@@ -48,14 +87,19 @@ def _encode(img: np.ndarray) -> str:
|
||||
def _load_insightface_samples() -> dict[str, str]:
|
||||
"""Return {'t1': <b64>, 't2': <b64>} from insightface.data.get_image.
|
||||
|
||||
t1 is a group photo, t2 a different one. We reuse both as
|
||||
stand-ins for "Alice photo 1/2" and "Bob".
|
||||
t1 is a group photo; t2 used to ship as a second sample but newer
|
||||
insightface releases dropped it. We fall back to `Tom_Hanks_54745`
|
||||
(also bundled) as a distinct second face.
|
||||
"""
|
||||
from insightface.data import get_image as ins_get_image
|
||||
|
||||
try:
|
||||
second = ins_get_image("t2")
|
||||
except AssertionError:
|
||||
second = ins_get_image("Tom_Hanks_54745")
|
||||
return {
|
||||
"t1": _encode(ins_get_image("t1")),
|
||||
"t2": _encode(ins_get_image("t2")),
|
||||
"t2": _encode(second),
|
||||
}
|
||||
|
||||
|
||||
@@ -97,17 +141,23 @@ class _Harness:
|
||||
)
|
||||
return res, ctx
|
||||
|
||||
def verify(self, a: str, b: str, threshold: float = 0.0):
|
||||
return self.svc.FaceVerify(
|
||||
backend_pb2.FaceVerifyRequest(img1=a, img2=b, threshold=threshold),
|
||||
_FakeContext(),
|
||||
def verify(self, a: str, b: str, threshold: float = 0.0, anti_spoofing: bool = False):
|
||||
ctx = _FakeContext()
|
||||
res = self.svc.FaceVerify(
|
||||
backend_pb2.FaceVerifyRequest(
|
||||
img1=a, img2=b, threshold=threshold, anti_spoofing=anti_spoofing
|
||||
),
|
||||
ctx,
|
||||
)
|
||||
return res, ctx
|
||||
|
||||
def analyze(self, img_b64: str):
|
||||
return self.svc.FaceAnalyze(
|
||||
backend_pb2.FaceAnalyzeRequest(img=img_b64),
|
||||
_FakeContext(),
|
||||
def analyze(self, img_b64: str, anti_spoofing: bool = False):
|
||||
ctx = _FakeContext()
|
||||
res = self.svc.FaceAnalyze(
|
||||
backend_pb2.FaceAnalyzeRequest(img=img_b64, anti_spoofing=anti_spoofing),
|
||||
ctx,
|
||||
)
|
||||
return res, ctx
|
||||
|
||||
|
||||
class InsightFaceEngineTest(unittest.TestCase):
|
||||
@@ -138,21 +188,21 @@ class InsightFaceEngineTest(unittest.TestCase):
|
||||
self.assertAlmostEqual(norm_sq, 1.0, places=2)
|
||||
|
||||
def test_verify_same_image(self):
|
||||
res = self.harness.verify(self.samples["t1"], self.samples["t1"])
|
||||
res, _ = self.harness.verify(self.samples["t1"], self.samples["t1"])
|
||||
self.assertTrue(res.verified)
|
||||
self.assertLess(res.distance, 0.05)
|
||||
|
||||
def test_verify_different_images(self):
|
||||
# t1 vs t2 depict different groups of people — top face on each
|
||||
# side is unlikely to match.
|
||||
res = self.harness.verify(self.samples["t1"], self.samples["t2"])
|
||||
res, _ = self.harness.verify(self.samples["t1"], self.samples["t2"])
|
||||
# We assert only that some numerical answer came back; the
|
||||
# matches-or-not determination depends on which face each side
|
||||
# picked and isn't a stable test assertion.
|
||||
self.assertGreaterEqual(res.distance, 0.0)
|
||||
|
||||
def test_analyze_has_age_and_gender(self):
|
||||
res = self.harness.analyze(self.samples["t1"])
|
||||
res, _ = self.harness.analyze(self.samples["t1"])
|
||||
self.assertGreater(len(res.faces), 0)
|
||||
for face in res.faces:
|
||||
self.assertGreater(face.face_confidence, 0.0)
|
||||
@@ -160,31 +210,29 @@ class InsightFaceEngineTest(unittest.TestCase):
|
||||
self.assertGreater(face.age, 0.0)
|
||||
self.assertIn(face.dominant_gender, ("Man", "Woman"))
|
||||
|
||||
def test_antispoof_requested_without_model_fails(self):
|
||||
# buffalo_l was loaded without antispoof options — requesting
|
||||
# liveness should surface a clear FAILED_PRECONDITION instead of
|
||||
# silently returning is_real=False.
|
||||
_, ctx = self.harness.verify(
|
||||
self.samples["t1"], self.samples["t1"], anti_spoofing=True
|
||||
)
|
||||
self.assertEqual(ctx.code, grpc.StatusCode.FAILED_PRECONDITION)
|
||||
self.assertIn("anti_spoofing", ctx.details)
|
||||
|
||||
|
||||
def _prepare_opencv_models_dir() -> str | None:
|
||||
"""Download OpenCV Zoo face ONNX files into a temp dir the way
|
||||
LocalAI's gallery would. Returns the directory, or None if
|
||||
downloads failed (network-restricted sandbox).
|
||||
"""
|
||||
import hashlib
|
||||
import tempfile
|
||||
import urllib.request
|
||||
return _download_files(OPENCV_FILES, "OPENCV_FACE_MODELS_DIR", "opencv-face-")
|
||||
|
||||
root = os.environ.get("OPENCV_FACE_MODELS_DIR") or tempfile.mkdtemp(
|
||||
prefix="opencv-face-"
|
||||
)
|
||||
for filename, uri, sha256 in OPENCV_FILES:
|
||||
dest = os.path.join(root, filename)
|
||||
if os.path.isfile(dest):
|
||||
if hashlib.sha256(open(dest, "rb").read()).hexdigest() == sha256:
|
||||
continue
|
||||
try:
|
||||
urllib.request.urlretrieve(uri, dest)
|
||||
except Exception:
|
||||
return None
|
||||
if hashlib.sha256(open(dest, "rb").read()).hexdigest() != sha256:
|
||||
return None
|
||||
return root
|
||||
|
||||
def _prepare_antispoof_models_dir(extra_dir: str | None = None) -> str | None:
|
||||
"""Download MiniFASNet ONNX files. If `extra_dir` is given, files
|
||||
are placed there alongside any existing weights so a single
|
||||
`model_path` can serve both detector/recognizer + antispoof.
|
||||
"""
|
||||
if extra_dir is not None:
|
||||
os.environ.setdefault("ANTISPOOF_MODELS_DIR", extra_dir)
|
||||
return _download_files(ANTISPOOF_FILES, "ANTISPOOF_MODELS_DIR", "antispoof-")
|
||||
|
||||
|
||||
class OnnxDirectEngineTest(unittest.TestCase):
|
||||
@@ -218,17 +266,79 @@ class OnnxDirectEngineTest(unittest.TestCase):
|
||||
self.assertGreater(len(res.embeddings), 0)
|
||||
|
||||
def test_verify_same_image(self):
|
||||
res = self.harness.verify(self.samples["t1"], self.samples["t1"], threshold=0.4)
|
||||
res, _ = self.harness.verify(self.samples["t1"], self.samples["t1"], threshold=0.4)
|
||||
self.assertTrue(res.verified)
|
||||
|
||||
def test_analyze_returns_regions_without_demographics(self):
|
||||
# OnnxDirectEngine intentionally doesn't populate age/gender.
|
||||
res = self.harness.analyze(self.samples["t1"])
|
||||
res, _ = self.harness.analyze(self.samples["t1"])
|
||||
self.assertGreater(len(res.faces), 0)
|
||||
for face in res.faces:
|
||||
self.assertEqual(face.dominant_gender, "")
|
||||
self.assertEqual(face.age, 0.0)
|
||||
|
||||
|
||||
class AntispoofingTest(unittest.TestCase):
|
||||
"""End-to-end FaceVerify / FaceAnalyze with anti_spoofing=True.
|
||||
|
||||
Loads the OpenCV-Zoo (Apache-2.0) face engine alongside the Silent-Face
|
||||
MiniFASNet ensemble. Real photos from insightface's bundled samples
|
||||
are expected to come back as is_real=True with score above threshold.
|
||||
A printed-photo style fake (the same photo re-encoded with heavy
|
||||
JPEG and a synthetic moiré overlay) is expected to flip the verdict.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# Reuse one directory for both detector/recognizer + antispoof
|
||||
# weights so a single LoadModel options block points at all of them.
|
||||
opencv_dir = _prepare_opencv_models_dir()
|
||||
if opencv_dir is None:
|
||||
raise unittest.SkipTest("OpenCV Zoo ONNX files could not be downloaded")
|
||||
antispoof_dir = _prepare_antispoof_models_dir(extra_dir=opencv_dir)
|
||||
if antispoof_dir is None:
|
||||
raise unittest.SkipTest("MiniFASNet ONNX files could not be downloaded")
|
||||
|
||||
# Antispoof only needs a single real-face sample; `t1` ships in
|
||||
# insightface.data across every release.
|
||||
from insightface.data import get_image as ins_get_image
|
||||
|
||||
cls.samples = {"t1": _encode(ins_get_image("t1"))}
|
||||
cls.harness = _Harness(BackendServicer())
|
||||
load = cls.harness.load(
|
||||
[
|
||||
"engine:onnx_direct",
|
||||
"detector_onnx:face_detection_yunet_2023mar.onnx",
|
||||
"recognizer_onnx:face_recognition_sface_2021dec.onnx",
|
||||
"antispoof_v2_onnx:MiniFASNetV2.onnx",
|
||||
"antispoof_v1se_onnx:MiniFASNetV1SE.onnx",
|
||||
],
|
||||
model_path=opencv_dir,
|
||||
)
|
||||
if not load.success:
|
||||
raise unittest.SkipTest(f"LoadModel failed: {load.message}")
|
||||
|
||||
def test_verify_returns_per_image_liveness(self):
|
||||
res, ctx = self.harness.verify(
|
||||
self.samples["t1"], self.samples["t1"], threshold=0.4, anti_spoofing=True
|
||||
)
|
||||
self.assertIsNone(ctx.code, f"FaceVerify error: {ctx.details}")
|
||||
# Score is the averaged "real" probability; both images are the
|
||||
# same real photo so should both populate non-zero scores.
|
||||
self.assertGreater(res.img1_antispoof_score, 0.0)
|
||||
self.assertGreater(res.img2_antispoof_score, 0.0)
|
||||
# Self-comparison: similarity must still match; final verified
|
||||
# combines similarity AND liveness, so we only assert it's set.
|
||||
self.assertIsInstance(res.verified, bool)
|
||||
|
||||
def test_analyze_populates_is_real_and_score(self):
|
||||
res, ctx = self.harness.analyze(self.samples["t1"], anti_spoofing=True)
|
||||
self.assertIsNone(ctx.code, f"FaceAnalyze error: {ctx.details}")
|
||||
self.assertGreater(len(res.faces), 0)
|
||||
for face in res.faces:
|
||||
self.assertGreaterEqual(face.antispoof_score, 0.0)
|
||||
self.assertLessEqual(face.antispoof_score, 1.0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user