需求:浏览器通过模型检测前方是否有人(距离和正脸),检测到之后拍照随机保存一帧
实现步骤:
- 获取浏览器的摄像头权限
- 创建video标签并通过video标签展示摄像头影像
- 创建canvas标签并通过canvas标签绘制摄像头影像并展示
- 将canvas的当前帧转成图片展示保存
pnpm install @vladmandic/face-api 下载依赖
pnpm install @vladmandic/face-api
下载model模型
将下载的model模型放到项目的public文件中 如下图
创建video和canvas标签
<video ref="videoRef" style="display: none"></video>
<template v-if="!picture || picture == ''">
<canvas ref="canvasRef" width="400" height="400"></canvas>
</template>
<template v-else>
<img ref="image" :src="picture" alt="" />
</template>
</div>
.video
width: 400px;
height: 400px;
border-radius: 50%;
overflow: hidden;
position: fixed;
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
}
.video_box {
position: fixed;
width: 400px;
height: 400px;
border-radius: 50%;
overflow: hidden;
}
@keyframes moveToTopLeft {
0% {
right: 50%;
top: 50%;
transform: translate(-50%, -50%);
}
100% {
right: -68px;
top: -68px;
transform: scale(0.5);
}
}
.video_box {
animation: moveToTopLeft 2s ease forwards;
}
介绍分析
video 类选择器 让视频流居中
picture变量 判断是否转成照片
video_box视频流的某一帧转成照片后 动态移动到屏幕右上角
主要逻辑代码 主要逻辑代码 主要逻辑代码!!!
import
import * as faceApi from '@vladmandic/face-api'
const videoRef = ref()
const options = ref(null)
const canvasRef = ref(null)
let timeout = null
// 初始化人脸识别
const init = async () => {
await faceApi.nets.ssdMobilenetv1.loadFromUri("/models") //人脸检测
// await faceApi.nets.tinyFaceDetector.loadFromUri("/models") //人脸检测 人和摄像头距离打开
await faceApi.nets.faceLandmark68Net.loadFromUri("/models") //特征检测 人和摄像头距离必须打开
// await faceApi.nets.faceRecognitionNet.loadFromUri("/models") //识别人脸
// await faceApi.nets.faceExpressionNet.loadFromUri("/models") //识别表情,开心,沮丧,普通
// await faceApi.loadFaceLandmarkModel("/models");
options.value = new faceApi.SsdMobilenetv1Options({
minConfidence: 0.5, // 0.1 ~ 0.9
});
await cameraOptions()
}
// 打开摄像头
const cameraOptions = async() => {
let constraints = {
video: true
}
// 如果不是通过loacalhost或者通过https访问会将报错捕获并提示
try {
if (navigator.mediaDevices) {
navigator.mediaDevices.getUserMedia(constraints).then((MediaStream) => {
// 返回参数
videoRef.value.srcObject = MediaStream;
videoRef.value.play();
recognizeFace()
}).catch((error) => {
console.log(error);
});
} else {
console.log('浏览器不支持开启摄像头,请更换浏览器')
}
} catch (err) {
console.log('非https访问')
}
}
// 检测人脸
const recognizeFace = async () => {
if (videoRef.value.paused) return clearTimeout(timeout);
canvasRef.value.getContext('2d', { willReadFrequently: true }).drawImage(videoRef.value, 0, 0, 400, 400);
// 直接检测人脸 灵敏较高
// const results = await new faceApi.DetectAllFacesTask(canvasRef.value, options.value).withFaceLandmarks();
// if (results.length > 0) {
// photoShoot()
// }
// 计算人与摄像头距离和是否正脸
const results = await new faceApi.detectSingleFace(canvasRef.value, options.value).withFaceLandmarks()
if (results) {
// 计算距离
const { positions } = results.landmarks;
const leftPoint = positions[0];
const rightPoint = positions[16];
// length 可以代替距离的判断 距离越近 length值越大
const length = Math.sqrt(
Math.pow(leftPoint.x - rightPoint.x, 2) +
Math.pow(leftPoint.y - rightPoint.y, 2),
);
// 计算是否正脸
const { roll, pitch, yaw } = results.angle
//roll水平角度 pitch上下角度 yaw 扭头角度
console.log(roll, pitch, yaw, length)
if (roll >= -10 && roll <= 10 && pitch >= -10 && pitch <= 10 && yaw>= -20 && yaw <= 20 && length >= 90 && length <= 110) {
photoShoot()
}
}
timeout = setTimeout(() => {
return recognizeFace()
}, 0)
}
const picture = ref(null)
const photoShoot = () => {
// 拿到图片的base64
let canvas = canvasRef.value.toDataURL("image/png");
// 停止摄像头成像
videoRef.value.srcObject.getTracks()[0].stop()
videoRef.value.pause()
if(canvas) {
// 拍照将base64转为file流文件
let blob = dataURLtoBlob(canvas);
let file = blobToFile(blob, "imgName");
// 将blob图片转化路径图片
picture.value = window.URL.createObjectURL(file)
} else {
console.log('canvas生成失败')
}
}
/**
* 将图片转为blob格式
* dataurl 拿到的base64的数据
*/
const dataURLtoBlob = (dataurl) => {
let arr = dataurl.split(','),
mime = arr[0].match(/:(.*?);/)[1],
bstr = atob(arr[1]),
n = bstr.length,
u8arr = new Uint8Array(n);
while(n--) {
u8arr[n] = bstr.charCodeAt(n);
}
return new Blob([u8arr], {
type: mime
});
}
/**
* 生成文件信息
* theBlob 文件
* fileName 文件名字
*/
const blobToFile = (theBlob, fileName) => {
theBlob.lastModifiedDate = new Date().toLocaleDateString();
theBlob.name = fileName;
return theBlob;
}
// 判断是否在区间
const isInRange = (number, start, end) => {
return number >= start && number <= end
}
export { init, videoRef, canvasRef, timeout, picture }
<video ref="videoRef" style="display: none"></video>
<template v-if="!picture || picture == ''">
<canvas ref="canvasRef" width="400" height="400"></canvas>
</template>
<template v-else>
<img ref="image" :src="picture" alt="" />
</template>
</div>
.video
width: 400px;
height: 400px;
border-radius: 50%;
overflow: hidden;
position: fixed;
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
}
.video_box {
position: fixed;
width: 400px;
height: 400px;
border-radius: 50%;
overflow: hidden;
}
@keyframes moveToTopLeft {
0% {
right: 50%;
top: 50%;
transform: translate(-50%, -50%);
}
100% {
right: -68px;
top: -68px;
transform: scale(0.5);
}
}
.video_box {
animation: moveToTopLeft 2s ease forwards;
}
介绍分析
video 类选择器 让视频流居中
picture变量 判断是否转成照片
video_box视频流的某一帧转成照片后 动态移动到屏幕右上角
主要逻辑代码 主要逻辑代码 主要逻辑代码!!!
import
import * as faceApi from '@vladmandic/face-api'
const videoRef = ref()
const options = ref(null)
const canvasRef = ref(null)
let timeout = null
// 初始化人脸识别
const init = async () => {
await faceApi.nets.ssdMobilenetv1.loadFromUri("/models") //人脸检测
// await faceApi.nets.tinyFaceDetector.loadFromUri("/models") //人脸检测 人和摄像头距离打开
await faceApi.nets.faceLandmark68Net.loadFromUri("/models") //特征检测 人和摄像头距离必须打开
// await faceApi.nets.faceRecognitionNet.loadFromUri("/models") //识别人脸
// await faceApi.nets.faceExpressionNet.loadFromUri("/models") //识别表情,开心,沮丧,普通
// await faceApi.loadFaceLandmarkModel("/models");
options.value = new faceApi.SsdMobilenetv1Options({
minConfidence: 0.5, // 0.1 ~ 0.9
});
await cameraOptions()
}
// 打开摄像头
const cameraOptions = async() => {
let constraints = {
video: true
}
// 如果不是通过loacalhost或者通过https访问会将报错捕获并提示
try {
if (navigator.mediaDevices) {
navigator.mediaDevices.getUserMedia(constraints).then((MediaStream) => {
// 返回参数
videoRef.value.srcObject = MediaStream;
videoRef.value.play();
recognizeFace()
}).catch((error) => {
console.log(error);
});
} else {
console.log('浏览器不支持开启摄像头,请更换浏览器')
}
} catch (err) {
console.log('非https访问')
}
}
// 检测人脸
const recognizeFace = async () => {
if (videoRef.value.paused) return clearTimeout(timeout);
canvasRef.value.getContext('2d', { willReadFrequently: true }).drawImage(videoRef.value, 0, 0, 400, 400);
// 直接检测人脸 灵敏较高
// const results = await new faceApi.DetectAllFacesTask(canvasRef.value, options.value).withFaceLandmarks();
// if (results.length > 0) {
// photoShoot()
// }
// 计算人与摄像头距离和是否正脸
const results = await new faceApi.detectSingleFace(canvasRef.value, options.value).withFaceLandmarks()
if (results) {
// 计算距离
const { positions } = results.landmarks;
const leftPoint = positions[0];
const rightPoint = positions[16];
// length 可以代替距离的判断 距离越近 length值越大
const length = Math.sqrt(
Math.pow(leftPoint.x - rightPoint.x, 2) +
Math.pow(leftPoint.y - rightPoint.y, 2),
);
// 计算是否正脸
const { roll, pitch, yaw } = results.angle
//roll水平角度 pitch上下角度 yaw 扭头角度
console.log(roll, pitch, yaw, length)
if (roll >= -10 && roll <= 10 && pitch >= -10 && pitch <= 10 && yaw>= -20 && yaw <= 20 && length >= 90 && length <= 110) {
photoShoot()
}
}
timeout = setTimeout(() => {
return recognizeFace()
}, 0)
}
const picture = ref(null)
const photoShoot = () => {
// 拿到图片的base64
let canvas = canvasRef.value.toDataURL("image/png");
// 停止摄像头成像
videoRef.value.srcObject.getTracks()[0].stop()
videoRef.value.pause()
if(canvas) {
// 拍照将base64转为file流文件
let blob = dataURLtoBlob(canvas);
let file = blobToFile(blob, "imgName");
// 将blob图片转化路径图片
picture.value = window.URL.createObjectURL(file)
} else {
console.log('canvas生成失败')
}
}
/**
* 将图片转为blob格式
* dataurl 拿到的base64的数据
*/
const dataURLtoBlob = (dataurl) => {
let arr = dataurl.split(','),
mime = arr[0].match(/:(.*?);/)[1],
bstr = atob(arr[1]),
n = bstr.length,
u8arr = new Uint8Array(n);
while(n--) {
u8arr[n] = bstr.charCodeAt(n);
}
return new Blob([u8arr], {
type: mime
});
}
/**
* 生成文件信息
* theBlob 文件
* fileName 文件名字
*/
const blobToFile = (theBlob, fileName) => {
theBlob.lastModifiedDate = new Date().toLocaleDateString();
theBlob.name = fileName;
return theBlob;
}
// 判断是否在区间
const isInRange = (number, start, end) => {
return number >= start && number <= end
}
export { init, videoRef, canvasRef, timeout, picture }