一、录制视频
javascript
import * as faceapi from "face-api.js";
// state
const [initVideo, setInitVideo] = useState<boolean>(false);
const videoRef = useRef<any>(null); // video
const mediaRecorderRef = useRef<any>(null);
const [recording, setRecording] = useState<boolean>(false); // 是否开始录像
const videoBlob = useRef<any>(null); // 视频文件流
const [isModelLoaded, setIsModelLoaded] = useState(false); // 人脸识别模型加载状态
const chunksRef = useRef<any>([]);
const intervalDetectFace = useRef<any>(null);
const isDetectingRef = useRef(false);
const isFaceModalOpenRef = useRef(false);
const streamRef = useRef<MediaStream | null>(null);
const startRecordTimeoutRef = useRef<number | null>(null);
const uploadTimeoutRef = useRef<number | null>(null);
const faceCountDownTimerRef = useRef<number | null>(null);
const modalAlertRef = useRef<any>(null);
const [currentFaceCountDown, setCurrentFaceCountDown] = useState<boolean>(false);
javascript
// 相机初始化
const setupCamera = async () => {
try {
const mediaStream = await navigator.mediaDevices.getUserMedia({
video: {
facingMode: "user",
width: { ideal: 320, max: 320 },
height: { ideal: 240, max: 240 },
frameRate: { ideal: 15, max: 15 }
},
audio: false
});
setStream(mediaStream);
streamRef.current = mediaStream;
console.log("=====初始化摄像头成功=====");
setInitVideo(true);
// 将视频流设置到video元素
if (videoRef.current) {
videoRef.current.srcObject = mediaStream;
}
} catch (err: any) {
console.error("获取摄像头失败:", err);
setInitVideo(false);
setRecording(false);
}
};
// 开始录制
const startRecording = () => {
if (startRecordTimeoutRef.current) {
window.clearTimeout(startRecordTimeoutRef.current);
startRecordTimeoutRef.current = null;
}
startRecordTimeoutRef.current = window.setTimeout(() => {
startRecordTimeoutRef.current = null;
startRecordingDelay();
}, 700);
};
const startRecordingDelay = () => {
console.log("=====开始录制=====");
const currentStream = streamRef.current || stream;
if (!currentStream) {
console.log("没有可用的视频流");
return;
}
chunksRef.current = [];
const options = {
mimeType: "video/webm;codecs=vp8,opus",
videoBitsPerSecond: 250000
};
try {
mediaRecorderRef.current = new MediaRecorder(currentStream, options);
} catch (e) {
console.error("不支持指定的MIME类型:", e);
try {
mediaRecorderRef.current = new MediaRecorder(currentStream, {
mimeType: "video/webm",
videoBitsPerSecond: 250000
});
} catch (e) {
console.log("您的浏览器不支持MediaRecorder或指定的配置");
try {
mediaRecorderRef.current = new MediaRecorder(currentStream);
} catch (e) {
console.log("无法创建 MediaRecorder");
return;
}
}
}
try {
mediaRecorderRef.current.ondataavailable = (event: any) => {
if (event.data && event.data.size > 0) {
chunksRef.current.push(event.data);
}
};
mediaRecorderRef.current.onstop = () => {
const blob = new Blob(chunksRef.current, { type: "video/webm" });
videoBlob.current = blob;
};
mediaRecorderRef.current.start();
setRecording(true);
props.changeRecord && props.changeRecord(true);
} catch (error) {
console.log(error);
}
};
// 停止录制
const stopRecording = async () => {
console.log("=====停止录制=====");
if (mediaRecorderRef.current && recording && mediaRecorderRef.current.state === "recording") {
mediaRecorderRef.current.stop();
setRecording(false);
}
if (uploadTimeoutRef.current) {
window.clearTimeout(uploadTimeoutRef.current);
uploadTimeoutRef.current = null;
}
uploadTimeoutRef.current = window.setTimeout(() => {
uploadTimeoutRef.current = null;
updateAnswerVideo(props.currentQuestionId);
}, 100);
};
javascript
// 人脸识别
const detectFaces = async () => {
if (isDetectingRef.current) return;
isDetectingRef.current = true;
const tf = (faceapi as any)?.tf;
tf?.engine?.().startScope?.();
try {
if (!videoRef.current || !videoRef.current.srcObject) return;
const detection = await faceapi.detectSingleFace(
videoRef.current,
new faceapi.TinyFaceDetectorOptions({
scoreThreshold: 0.3,
inputSize: 128
})
);
// .withFaceLandmarks(); 不需要特征点,这步非常耗时
if (detection) {
if (isFaceModalOpenRef.current) {
isFaceModalOpenRef.current = false;
modalAlertRef.current.handleClose();
}
} else {
handleShowDEtectFace();
}
} catch (error) {
console.error("人脸检测错误:", error);
} finally {
tf?.engine?.().endScope?.();
isDetectingRef.current = false;
}
};
//未检测到人脸弹窗
const handleShowDEtectFace = () => {
if (isFaceModalOpenRef.current) return;
isFaceModalOpenRef.current = true;
modalAlertRef.current.handleShow({
title: "操作提示",
content: "请保持人脸在视频中,认真作答 ",
actions: [
{
key: "confirm",
primary: true,
text: "确定",
style: { background: "#00a2e9" },
onClick: () => {
modalAlertRef.current.handleClose();
isFaceModalOpenRef.current = false;
setCurrentFaceCountDown(true);
countDownFace();
}
}
]
});
};
useEffect(() => {
// 加载人脸识别模型
const loadModels = async () => {
await faceapi.nets.tinyFaceDetector.loadFromUri("./models");
setIsModelLoaded(true);
};
loadModels().catch(console.error);
// 退出前记得先清空录制视频的文件流,倒计时,人脸模式等等
}, []);
// 执行人脸检测
useEffect(() => {
if (!isModelLoaded || !recording || currentFaceCountDown || !props.canRecordVideo) {
if (isFaceModalOpenRef.current) {
isFaceModalOpenRef.current = false;
modalAlertRef.current.handleClose();
}
if (intervalDetectFace.current) {
clearInterval(intervalDetectFace.current);
intervalDetectFace.current = null;
}
return;
}
clearInterval(intervalDetectFace.current);
intervalDetectFace.current = setInterval(() => {
detectFaces();
}, 5000); // 将检测频率从 2s 降低到 5s,减少 CPU 占用
}, [isModelLoaded, recording, currentFaceCountDown, props.canRecordVideo]);