发布于 

前端实现活体人脸检测

转自:https://juejin.cn/post/7145732134630588447
作者:wangpeng1478

前言

在网页中使用活体人脸检测大部分都是前端录制一段视频,让后端调用第三方接口去判断,今天我们就用纯前端方式来实现这个功能。

创建人脸模型

引入tensorflow训练好的人脸特征点检测模型,预测 486 个 3D 人脸特征点,推断出人脸的近似面部几何图形。

1
2
3
4
5
6
7
8
9
10
async createDetector(){
const model = faceLandmarksDetection.SupportedModels.MediaPipeFaceMesh;
const detectorConfig = {
maxFaces:1, //检测到的最大面部数量
refineLandmarks:true, //可以完善眼睛和嘴唇周围的地标坐标,并在虹膜周围输出其他地标
runtime: 'mediapipe',
solutionPath: 'https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh', //WASM二进制文件和模型文件所在的路径
};
this.detector = await faceLandmarksDetection.createDetector(model, detectorConfig);
}

创建人脸模型

人脸识别

1
2
3
4
5
6
7
8
9
10
11
12
13
14
async renderPrediction() {
var video = this.$refs['video'];
var canvas = this.$refs['canvas'];
var context = canvas.getContext('2d');
context.clearRect(0, 0, canvas.width, canvas.height);
const Faces = await this.detector.estimateFaces(video, {
flipHorizontal:false, //镜像
});
if (Faces.length > 0) {
this.log(`检测到人脸`);
} else {
this.log(`没有检测到人脸`);
}
}

特征检测

人脸特征提取就是针对人脸的某些特征进行判断(以下的动作判断仅供参考,实际情况下需要多个特征点来判断某个动作)

人脸的远近

取4帧 人脸占画面的比例,判断这组值是递增或递减,取第一帧和最后最后一帧的占比,根据阈值判断人脸的远近。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
isFarAndNear(face) {
const proportion = this.GetPercent(face.box.width * face.box.height, this.width * this.height);
this.isFarArr.push(proportion);
//计算4帧的动态变化
if (this.isFarArr.length > 4) {
this.isFarArr.shift();
//递增 或 递减
if (this.Increment(this.isFarArr) || this.Decrease(this.isFarArr)) {
const first = this.isFarArr[0];
const last = this.isFarArr[this.isFarArr.length - 1];
const diff = this.GetPercent(first - last, first + last);
if (diff <= -5) {
this.log(`【动作】靠近`, `info`);
};
if (diff >= 5) {
this.log(`【动作】远离`, `primary`);
};
}
};
},

张嘴

取2帧 [10,152]占[0,17]的比例,判断递增,取第一帧和最后最后一帧的距离,根据阈值判断张嘴。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
isOpenMouth(face, ctx) {
const featureIndex1 = [0, 17];
const featureLocation1 = [];
const featureIndex2 = [10, 152];
const featureLocation2 = [];

(face.keypoints || []).forEach((element, index) => {
if (featureIndex1.includes(index)) {
featureLocation1.push([element.x, element.y])
}
if (featureIndex2.includes(index)) {
featureLocation2.push([element.x, element.y])
}
});

// 10,152占0,17的比例
const proportion = this.GetPercent(this.getDistance(
featureLocation1[0][0],
featureLocation1[0][1],
featureLocation1[1][0],
featureLocation1[1][1],
), this.getDistance(
featureLocation2[0][0],
featureLocation2[0][1],
featureLocation2[1][0],
featureLocation2[1][1],
));
this.isOpenMouthArr.push(proportion);

//计算2帧的动态变化
if (this.isOpenMouthArr.length > 2) {
this.isOpenMouthArr.shift();
if (this.Increment(this.isOpenMouthArr)) {
const first = this.isOpenMouthArr[0];
const last = this.isOpenMouthArr[this.isOpenMouthArr.length - 1];
const diff = this.GetPercent(first - last, first + last);
if (diff <= -5) {
this.log(`【动作】张嘴`, `info`);
};
}
}
}

眨眼

根据左眼[159, 144] 右眼[385, 374]的距离,判断连续4帧小于阈值,即可判断眨眼了。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
isWink(face, ctx) {
const leftEye = [159, 144];
const leftEyeLocation = [];
const rightEye = [385, 374];
const rightEyeLocation = [];
(face.keypoints || []).forEach((element, index) => {
if (leftEye.includes(index)) {
leftEyeLocation.push([element.x, element.y]);
}
if (rightEye.includes(index)) {
rightEyeLocation.push([element.x, element.y]);
}
});
let leftProportion = this.getDistance(
leftEyeLocation[0][0],
leftEyeLocation[0][1],
leftEyeLocation[1][0],
leftEyeLocation[1][1],
);
let rightProportion = this.getDistance(
rightEyeLocation[0][0],
rightEyeLocation[0][1],
rightEyeLocation[1][0],
rightEyeLocation[1][1],
);
if (leftProportion <= 5 || rightProportion <= 5) {
this.isWinkArr.push([leftProportion, rightProportion]);
//连续4帧一次
if (this.isWinkArr.length >= 4) {
this.log(`【动作】眨眼`, `info`);
this.isWinkArr = [];
}
} else {
this.isWinkArr = [];
}
}

左右摇头

根据左脸[195, 93] 右脸[195, 323]的相差距离,取4帧数据,根据距离和正负数,来判断向左转和向右转。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
isShakingHisHead(face, ctx) {
const leftFace = [195, 93];
const leftFaceLocation = [];
const rightFace = [195, 323];
const rightFaceLocation = [];

(face.keypoints || []).forEach((element, index) => {
if (leftFace.includes(index)) {
leftFaceLocation.push([element.x, element.y]);
}
if (rightFace.includes(index)) {
if (rightFaceLocation.length === 0) {
ctx.moveTo(element.x, element.y)
} else {
ctx.lineTo(element.x, element.y)
}
rightFaceLocation.push([element.x, element.y]);

}
});

let leftProportion = this.getDistance(
leftFaceLocation[0][0],
leftFaceLocation[0][1],
leftFaceLocation[1][0],
leftFaceLocation[1][1],
);
let rightProportion = this.getDistance(
rightFaceLocation[0][0],
rightFaceLocation[0][1],
rightFaceLocation[1][0],
rightFaceLocation[1][1],
);

const diff = this.GetPercent(leftProportion - rightProportion, leftProportion + rightProportion);
this.isShakingHisHeadArr.push(diff); //左 -40 右 40

//计算4帧的动态变化
if (this.isShakingHisHeadArr.length > 4) {
this.isShakingHisHeadArr.shift();
const isL = this.isShakingHisHeadArr.every(e => e >= -60);
const isR = this.isShakingHisHeadArr.every(e => e <= 60);
if (isL) {
this.log(`【动作】向左转`, `info`);
}
if (isR) {
this.log(`【动作】向右转`, `info`);
}
};


}

文档

tensorflow文档
github仓库