视频解析图片识别人脸采集并切图video-clip-images,face-api.js

video-clip-images

采集视频中的人脸并截取

  • Demo是通过face-api实现的。具体内容可前往Github:face-api
  • 注: 返回大部分使用的都是base64
  • Demo地址放在了码云上:video-clip-images

目录结构

video-clip-images/
├── face-api.js-master/
│ ├── weights/ 模型
│ │ └── ...
├── js/ 脚本
│ └── ...
└── index.html

第一步加载模型

await faceapi.nets.tinyFaceDetector.loadFromUri(
 "./face-api.js-master/weights"
);

第二步 通过传入的url 获取每一秒的图片

 async getVideoFace(url) {
 const video = document.createElement("video");
 const canvas = document.createElement("canvas");
 video.src = url;
 await new Promise((resolve) => {
 video.addEventListener("loadedmetadata", () => {
 resolve();
 });
 });
 const duration = video.duration;
 const ctx = canvas.getContext("2d");
 canvas.width = video.videoWidth;
 canvas.height = video.videoHeight;
 const frameData = [];
 for (let i = 0; i < duration; i++) {
 video.currentTime = i;
 await new Promise((resolve) => {
 video.addEventListener("seeked", function handler() {
 video.removeEventListener("seeked", handler);
 ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
 const base64 = canvas.toDataURL("image/jpeg");
 frameData.push({
 base64,
 second: i,
 });
 resolve();
 });
 });
 }
 return frameData;
 }

第三步 处理每一秒的图片并裁剪

  • detectFrame获取人物在图中的位置
  • getClipImage获取裁剪后的图片
     getClipImage(box, image) {
     const newCanvas = document.createElement("canvas");
     const newCtx = newCanvas.getContext("2d");
     newCanvas.width = box.width;
     newCanvas.height = box.height;
     newCtx.drawImage(
     image,
     box.x,
     box.y,
     box.width,
     box.height,
     0,
     0,
     box.width,
     box.height
     );
     const base64Data = newCanvas.toDataURL("image/png");
     let img = document.createElement("img");
     img.src = base64Data;
     return base64Data;
     }
    
  • 通过第二步获取的data使用detectFrame获取box
  • 通过filter过滤box为空也就是没有获取到人脸。
  • 最后使用getClipImage获取到裁剪后的图片
 async install(url) {
 let data = await this.getVideoFace(url);
 const detectFrame = async (img) => {
 let box;
 const detections = await faceapi.detectAllFaces(
 img,
 new faceapi.TinyFaceDetectorOptions()
 );
 detections.forEach((detection) => {
 box = detection.box;
 });
 return box;
 };
 data = data.map((item) => {
 return new Promise((resolve, reject) => {
 let img = new Image();
 img.onload = async () => {
 item.box = await detectFrame(img);
 item.img = img;
 resolve(item);
 };
 img.src = item.base64;
 });
 });
 data = await Promise.all(data);
 console.log(`本次处理耗时:${this.numb}秒`);
 clearInterval(this.time);
 return data
 .filter((i) => Boolean(i.box))
 .map((item) => {
 item.clipImage = this.getClipImage(item.box, item.img);
 return item;
 });
 }

报告!菜坤后端要Blob

  • 工资分我一份!!!!!
  • 处理裁剪好的base64base64ToBlob(item.clipImage)
     new ClipImages("./2025419-450082.mp4").then((data) => {
     data.forEach((item) => {
     let img = document.createElement("img");
     img.src = item.clipImage;
     document.body.appendChild(img);
     item.clipImageBlob = base64ToBlob(item.clipImage)
     });
     console.log(data);
     });
    
  • base64ToBlob代码片段
    function base64ToBlob(base64, contentType = "image/png") {
     // 去掉 Base64 编码字符串的前缀
     const sliceIndex = base64.indexOf(",") + 1;
     const base64Data = base64.slice(sliceIndex);
     // 解码 Base64 数据
     const binary = atob(base64Data);
     const length = binary.length;
     const buffer = new ArrayBuffer(length);
     const view = new Uint8Array(buffer);
     // 将二进制字符串转换为 Uint8Array
     for (let i = 0; i < length; i++) {
     view[i] = binary.charCodeAt(i);
     }
     // 创建 Blob 对象
     return new Blob([view], { type: contentType });
    }
    

效果图

作者:小泽沐优声原文地址:https://www.cnblogs.com/ooo51o/p/18836320

%s 个评论

要回复文章请先登录注册