React录制视频和人脸识别

一、录制视频

javascript 复制代码
    import * as faceapi from "face-api.js";

    // state
	const [initVideo, setInitVideo] = useState<boolean>(false);
	const videoRef = useRef<any>(null); // video
	const mediaRecorderRef = useRef<any>(null);
	const [recording, setRecording] = useState<boolean>(false); // 是否开始录像
	const videoBlob = useRef<any>(null); // 视频文件流
	const [isModelLoaded, setIsModelLoaded] = useState(false); // 人脸识别模型加载状态
    const chunksRef = useRef<any>([]);
	const intervalDetectFace = useRef<any>(null);
	const isDetectingRef = useRef(false);
	const isFaceModalOpenRef = useRef(false);
	const streamRef = useRef<MediaStream | null>(null);
	const startRecordTimeoutRef = useRef<number | null>(null);
	const uploadTimeoutRef = useRef<number | null>(null);
	const faceCountDownTimerRef = useRef<number | null>(null);
	const modalAlertRef = useRef<any>(null);
	const [currentFaceCountDown, setCurrentFaceCountDown] = useState<boolean>(false);
javascript 复制代码
	// 相机初始化
	const setupCamera = async () => {
		try {
			const mediaStream = await navigator.mediaDevices.getUserMedia({
				video: {
					facingMode: "user",
					width: { ideal: 320, max: 320 },
					height: { ideal: 240, max: 240 },
					frameRate: { ideal: 15, max: 15 }
				},
				audio: false
			});
			setStream(mediaStream);
			streamRef.current = mediaStream;
			console.log("=====初始化摄像头成功=====");
			setInitVideo(true);
			// 将视频流设置到video元素
			if (videoRef.current) {
				videoRef.current.srcObject = mediaStream;
			}
		} catch (err: any) {
			console.error("获取摄像头失败:", err);
			setInitVideo(false);
			setRecording(false);
		}
	};

	// 开始录制
	const startRecording = () => {
		if (startRecordTimeoutRef.current) {
			window.clearTimeout(startRecordTimeoutRef.current);
			startRecordTimeoutRef.current = null;
		}
		startRecordTimeoutRef.current = window.setTimeout(() => {
			startRecordTimeoutRef.current = null;
			startRecordingDelay();
		}, 700);
	};

	const startRecordingDelay = () => {
		console.log("=====开始录制=====");
		const currentStream = streamRef.current || stream;
		if (!currentStream) {
			console.log("没有可用的视频流");
			return;
		}

		chunksRef.current = [];
		const options = {
			mimeType: "video/webm;codecs=vp8,opus",
			videoBitsPerSecond: 250000
		};

		try {
			mediaRecorderRef.current = new MediaRecorder(currentStream, options);
		} catch (e) {
			console.error("不支持指定的MIME类型:", e);
			try {
				mediaRecorderRef.current = new MediaRecorder(currentStream, {
					mimeType: "video/webm",
					videoBitsPerSecond: 250000
				});
			} catch (e) {
				console.log("您的浏览器不支持MediaRecorder或指定的配置");
				try {
					mediaRecorderRef.current = new MediaRecorder(currentStream);
				} catch (e) {
					console.log("无法创建 MediaRecorder");
					return;
				}
			}
		}

		try {
			mediaRecorderRef.current.ondataavailable = (event: any) => {
				if (event.data && event.data.size > 0) {
					chunksRef.current.push(event.data);
				}
			};
			mediaRecorderRef.current.onstop = () => {
				const blob = new Blob(chunksRef.current, { type: "video/webm" });
				videoBlob.current = blob;
			};

			mediaRecorderRef.current.start();
			setRecording(true);
			props.changeRecord && props.changeRecord(true);
		} catch (error) {
			console.log(error);
		}
	};

	// 停止录制
	const stopRecording = async () => {
		console.log("=====停止录制=====");
		if (mediaRecorderRef.current && recording && mediaRecorderRef.current.state === "recording") {
			mediaRecorderRef.current.stop();
			setRecording(false);
		}

		if (uploadTimeoutRef.current) {
			window.clearTimeout(uploadTimeoutRef.current);
			uploadTimeoutRef.current = null;
		}
		uploadTimeoutRef.current = window.setTimeout(() => {
			uploadTimeoutRef.current = null;
			updateAnswerVideo(props.currentQuestionId);
		}, 100);
	};
javascript 复制代码
	// 人脸识别
    const detectFaces = async () => {
		if (isDetectingRef.current) return;
		isDetectingRef.current = true;
		const tf = (faceapi as any)?.tf;
		tf?.engine?.().startScope?.();
		try {
			if (!videoRef.current || !videoRef.current.srcObject) return;

			const detection = await faceapi.detectSingleFace(
				videoRef.current,
				new faceapi.TinyFaceDetectorOptions({
					scoreThreshold: 0.3,
					inputSize: 128
				})
			);
			// .withFaceLandmarks(); 不需要特征点,这步非常耗时
			if (detection) {
				if (isFaceModalOpenRef.current) {
					isFaceModalOpenRef.current = false;
					modalAlertRef.current.handleClose();
				}
			} else {
				handleShowDEtectFace();
			}
		} catch (error) {
			console.error("人脸检测错误:", error);
		} finally {
			tf?.engine?.().endScope?.();
			isDetectingRef.current = false;
		}
	};

	//未检测到人脸弹窗
	const handleShowDEtectFace = () => {
		if (isFaceModalOpenRef.current) return;
		isFaceModalOpenRef.current = true;
		modalAlertRef.current.handleShow({
			title: "操作提示",
			content: "请保持人脸在视频中,认真作答 ",
			actions: [
				{
					key: "confirm",
					primary: true,
					text: "确定",
					style: { background: "#00a2e9" },
					onClick: () => {
						modalAlertRef.current.handleClose();
						isFaceModalOpenRef.current = false;
						setCurrentFaceCountDown(true);
						countDownFace();
					}
				}
			]
		});
	};

	useEffect(() => {
		// 加载人脸识别模型
		const loadModels = async () => {
			await faceapi.nets.tinyFaceDetector.loadFromUri("./models");
			setIsModelLoaded(true);
		};
		loadModels().catch(console.error);
		// 退出前记得先清空录制视频的文件流,倒计时,人脸模式等等
	}, []);

	// 执行人脸检测
	useEffect(() => {
		if (!isModelLoaded || !recording || currentFaceCountDown || !props.canRecordVideo) {
			if (isFaceModalOpenRef.current) {
				isFaceModalOpenRef.current = false;
				modalAlertRef.current.handleClose();
			}
			if (intervalDetectFace.current) {
				clearInterval(intervalDetectFace.current);
				intervalDetectFace.current = null;
			}
			return;
		}
		clearInterval(intervalDetectFace.current);
		intervalDetectFace.current = setInterval(() => {
			detectFaces();
		}, 5000); // 将检测频率从 2s 降低到 5s,减少 CPU 占用
	}, [isModelLoaded, recording, currentFaceCountDown, props.canRecordVideo]);
相关推荐
小李子呢02112 小时前
前端八股Vue---Vue2和Vue3的区别,set up的用法
前端·javascript·vue.js
邂逅星河浪漫2 小时前
【银行内网开发-管理端】Vue管理端+Auth后台开发+Nginx配置+Linux部署(详细解析)
linux·javascript·css·vue.js·nginx·html·前后端联调
星空椰2 小时前
JavaScript 进阶基础:函数、作用域与常用技巧总结
开发语言·前端·javascript
奔跑的呱呱牛2 小时前
@giszhc/vue-page-motion:Vue3 路由动画怎么做才“丝滑”?(附在线示例)
前端·javascript·vue.js
EasyDSS3 小时前
私有化视频会议平台/智能会议管理系统EasyDSS一站式视频云平台重构企业数字化协作底座
重构·音视频
gCode Teacher 格码致知4 小时前
Javascript提高:小数精度和随机数-由Deepseek产生
开发语言·javascript·ecmascript
倾颜5 小时前
React 19 源码怎么读:目录结构、包关系、调试方式与主线问题
react.js
冴羽6 小时前
请愿书:Node.js 核心代码不应该包含 AI 代码!
前端·javascript·node.js
mmmmm123426 小时前
深入 DOM 查询底层:HTMLCollection 动态原理与 querySelectorAll 静态快照解析
前端·javascript
淸湫6 小时前
前端JavaScript:数据类型、实例对象 、内置对象、构造函数之间的关系
javascript