【前端笔记】纯前端实现摄像头图像识别和训练 | 线性模型推理 | tensorFlow.js

自从大二初次接触前端以来,一直都有记markdown笔记的习惯.又因为掘金最近有一个活动.所以开了这个专栏。我会写一些业务相关的小技巧和前端知识中的重点内容之类的整理一下发上来。难免有点地方不够细致。欢迎大家指教

这篇文章我们会讲到数据预测类和图像预测类的tensorFlow.js是怎么使用的。数据预测类其实在我上一篇文章已经讲的比较详细了juejin.cn/post/730056... 。因此这篇文章的重点会放在图像识别的训练推断,多目标识别,单目标识别,调用摄像机识别等东西。然后基本代码中都有注释,然后基本都是tensorFlow 的基础概念,不懂的可以去到我上一篇文章或者去到他的官网看一下去。在这之前提前给大家看看最终的效果吧。

这是数据预测类最后的输出

这是图像预测类最终的输出

这篇的源码是我两年前实习的时候抽空搞的,那个时候vue2还算主流,也就用vue2搭了这个东西。虽然vue2 现在基本不用了,但是基本的思路还是相通的。源码链接:gitee.com/Electrolux/...

1 (预测类)

1.0 效果 -path(''/ai1)

源码链接:gitee.com/Electrolux/...

路径 /ai1。访问这个路径就可以看到 下图中的效果

下面是一些关键的代码片段

1.1 引入包

javascript 复制代码
import * as tf from "@tensorflow/tfjs";    //"@tensorflow/tfjs": "^3.20.0",
import * as tfvis from "@tensorflow/tfjs-vis"; //"@tensorflow/tfjs-vis": "^1.5.1",

 data() {
    return {
      data: null,
      examples: null,
      model: null,
      tensorData:null,
      predictData:null
    };
  },

1.2 :创造模型和data的写入

js 复制代码
createModel() {
      // 创造一个 序贯模型(Sequential) 
      this.model = tf.sequential();
      // Add a single hidden layer 添加层
      // model.add(tf.layers.dense({units: 50, activation: 'sigmoid'})); 在这种情况下是sigmoid激活
      this.model.add(tf.layers.dense({ inputShape: [1], units: 1, useBias: true }));
      // Add an output layer
      this.model.add(tf.layers.dense({ units: 1, useBias: true }));
      return this.model;
    },

1.3:数据预处理

js 复制代码
dataPre() {
      //   tf.util.shuffle(this.data);
      // 转换为张量 这里我们制作两个数组,一个用于我们的输入示例,另一个用于真正的输出值(在机器学习中称为标签)。
      console.log(this.data,"预期训练的模型");
      this.data = [
        {
          x: 201,
          y:1,
        },
        {
          x: 1,
          y: 0,
        },
        {
          x: [[2,1]],
          y: 1,
        },
        {
          x: [[1,1]],
          y: 1,
        },
      ]
    //   var inputs = this.data.map((d) => d.horsepower);
    //   const labels = this.data.map((d) => d.mpg);
      var inputs = this.data.map((d) => d.x);
      const labels = this.data.map((d) => d.y);
      const inputTensor = tf.tensor2d(inputs, [inputs.length, 1]);
      const labelTensor = tf.tensor2d(labels, [labels.length, 1]);
      // 规范化数据 -1 - 1
      var inputMax = inputTensor.max();
      const inputMin = inputTensor.min();
      const labelMax = labelTensor.max();
      const labelMin = labelTensor.min();
      const normalizedInputs = inputTensor.sub(inputMin).div(inputMax.sub(inputMin));
      const normalizedLabels = labelTensor.sub(labelMin).div(labelMax.sub(labelMin));
    //这里是为了之后predict用的数据
    	this.predictData = {inputMin:inputMin,inputMax:inputMax,labelMax:labelMax,labelMin:labelMin}
      return { inputs: normalizedInputs, labels: normalizedLabels };
    },

1.4:训练模型

js 复制代码
async trainModel(model, inputs, labels) {
      this.model.compile({
        optimizer: tf.train.adam(),
        loss: tf.losses.meanSquaredError,
        metrics: ["mse"],
      });
      const batchSize = 28;
      const epochs = 50;
      return await this.model.fit(inputs, labels, {
        batchSize,
        epochs,
        shuffle: true,
        //这里展示图像
        callbacks: tfvis.show.fitCallbacks(
          { name: "Training Performance" },
          ["loss", "mse"],
          { height: 200, callbacks: ["onEpochEnd"] }
        ),
      });
    },

1.5 引入变量最后进行调用

js 复制代码
document.addEventListener("DOMContentLoaded", this.run());

async run() {
      // 1.创造model
      this.createModel();
      // 2.数据预处理,转化张量。分成inputs和label
      const tensorData = this.dataPre();
      const { inputs, labels } = tensorData;
      // 3.确定模型,训练模型
      this.trainModel(this.model, inputs, labels);
      console.log("Done Training");
},

1.6 单点预测

js 复制代码
html中 <button id="load-data" @click="singleTestModel">predict Data</button>

singleTestModel(){
        var inputs=[300]
        const inputTensor = tf.tensor2d(inputs, [inputs.length, 1]);
        alert(this.model.predict(inputTensor.reshape([1, 1])))
}

1.7 多点预测

js 复制代码
testModel(model, inputData, normalizationData) {
      const { inputMax, inputMin, labelMin, labelMax } = this.predictData;
        console.log(inputMax, inputMin, labelMin, labelMax,normalizationData)
      // Generate predictions for a uniform range of numbers between 0 and 1;
      // We un-normalize the data by doing the inverse of the min-max scaling
      // that we did earlier.
      const [xs, preds] = tf.tidy(() => {
        const xs = tf.linspace(0, 1, 100);
        const preds = model.predict(xs.reshape([100, 1]));

        const unNormXs = xs.mul(inputMax.sub(inputMin)).add(inputMin);

        const unNormPreds = preds.mul(labelMax.sub(labelMin)).add(labelMin);

        // Un-normalize the data
        return [unNormXs.dataSync(), unNormPreds.dataSync()];
      });

      const predictedPoints = Array.from(xs).map((val, i) => {
        return { x: val, y: preds[i] };
      });

      const originalPoints = inputData.map((d) => ({
        x: d.horsepower,
        y: d.mpg,
      }));

      tfvis.render.scatterplot(
        { name: "Model Predictions vs Original Data" },
        { values: [originalPoints, predictedPoints], series: ["original", "predicted"] },
        {
          xLabel: "Horsepower",
          yLabel: "MPG",
          height: 300,
        }
      );
    },

1.8 绘图示例

js 复制代码
// 绘图示例,可以去 @tensorflow/tfjs-vis 里面看类型去
    async runExample() {
      // Load and plot the original input data that we are going to train on.
      //   const data = await this.getData();
      //   var values = data.map((d) => ({
      //     x: d.horsepower,
      //     y: d.mpg,
      //   }));
      //   console.log(values, "values");
      // 散点图
      var values = [
        {
          x: 20,
          y: 20,
        },
        {
          x: 165,
          y: 30,
        },
        {
          x: 165,
          y: 30,
        },
        {
          x: 165,
          y: 30,
        },
      ];

      //   tfvis.render.scatterplot(
      //     { name: "Horsepower v MPG" },
      //     { values },
      //     {
      //       xLabel: "x轴的坐标",
      //       yLabel: "y轴的坐标",
      //       height: 300,
      //     }
      //   );

      //柱状图
      //   const data = [
      //     { index: 0, value: 50 },
      //     { index: 1, value: 100 },
      //     { index: 2, value: 150 },
      //   ];
      //   const surface = { name: "Bar chart", tab: "Charts" };
      //   tfvis.render.barchart(surface, data);

      //混淆矩阵
      //   const data = {
      //     values: [
      //       [4, 2, 8],
      //       [1, 7, 2],
      //       [3, 3, 20],
      //     ],
      //   };
      //   // Render to visor
      //   const surface = {
      //     name: "Confusion Matrix with Excluded Diagonal",
      //     tab: "Charts",
      //   };
      //   tfvis.render.confusionMatrix(surface, data, {
      //     shadeDiagonal: false,
      //   });

      //折线图
      const series1 = Array(100)
        .fill(0)
        .map((y) => Math.random() * 100 + 50)
        .map((y, x) => ({ x, y }));
      const data = { values: [series1] };

      // Render to visor
      const surface = { name: "Zoomed Line Chart", tab: "Charts" };
      tfvis.render.linechart(surface, data, { zoomToFit: true });
    },

1.9 保存载入实例

js 复制代码
 	<button id="load-data" @click="save">保存</button>
     <button id="load-data" @click="load">加载模型</button>
     
     async save() {
      //本地存储空间(仅限浏览器)
      await this.model.save("localstorage://my-model-1");
      // 真正保存   await this.model.save('downloads://my-model');
      alert("保存模型成功");
    },
    async load() {
      //本地存储空间(仅限浏览器)tensorflowjs_models/my-model-1/model_topology
      const MODEL_URL = "localstorage://my-model-1";
      var inputs = [300];
      const inputTensor = tf.tensor2d(inputs, [inputs.length, 1]);
      //加载预测模型
      const model = await tf.loadLayersModel("localstorage://my-model-1");
      //加载图形分析模型
      // const model = await loadGraphModel(MODEL_URL);
      alert(model.predict(inputTensor.reshape([1, 1])));
    },

1.10 定义网络示例

下面的最基础的定义一个网络的示例

js 复制代码
const model = tf.sequential();

  const IMAGE_WIDTH = 28;
  const IMAGE_HEIGHT = 28;
  const IMAGE_CHANNELS = 1;

  // 第一层为卷积层,需要声明输入张量的形状信息
  // 输入为[28,28,1],也就是长宽均为28的灰度图,色彩深度只有1维
  // C1:feature maps 8@24x24
  model.add(tf.layers.conv2d({
    inputShape: [IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS],//输入张量的形状
    kernelSize: 5, //卷积核尺寸
    filters: 8, //卷积核数量
    strides: 1, //卷积核移动步长
    activation: 'relu', //激活函数
    kernelInitializer: 'varianceScaling' //卷积核权重初始化方式
  }));

  // 第二层为最大池化层,使用最大池化计算法 
  // S2:feature maps 8@12x12
  model.add(tf.layers.maxPooling2d({
    poolSize: [2, 2],//滑动窗口尺寸
    strides: [2, 2]//滑动窗口移动步长
  }));

  // 第三层为卷积层
  // C3 : feature maps 16@8x8
  model.add(tf.layers.conv2d({
    kernelSize: 5,
    filters: 16,
    strides: 1,
    activation: 'relu',
    kernelInitializer: 'varianceScaling'
  }));

  // 第四层为最大池化层
  // S4 : feature maps 16@4x4
  model.add(tf.layers.maxPooling2d({
    poolSize: [2, 2],
    strides: [2, 2]
  }));

  // 第五层是一个特殊的卷积层,将多维张量扁平化为1维,从而连接后续的全连接层
  // C5 : feature maps 256@1x1
  model.add(tf.layers.flatten());

  // 假设增加一个84个节点的全连接层
/*   model.add(tf.layers.dense({
    units:84,
    activation:'relu',
    useBias:true,
    name:'full-connection-layer'
  })) */

  // 第六层为输出层,输出共10个类别,softmax激活函数的结果可以看做是概率值
  // OUTPUT
  const NUM_OUTPUT_CLASSES = 10;
  model.add(tf.layers.dense({
    units: NUM_OUTPUT_CLASSES,
    kernelInitializer: 'varianceScaling',
    activation: 'softmax'
  }));


  // Choose an optimizer, loss function and accuracy metric,
  // then compile and return the model
  const optimizer = tf.train.adam();
  model.compile({
    optimizer: optimizer,
    loss: 'categoricalCrossentropy',
    metrics: ['accuracy'],
  });

2 图像识别类

2.1 单物体识别-path('/ai4')

源码链接:gitee.com/Electrolux/...

主要是 我们需要 将 canvas 里面的imgdata变成训练中的向量,然后其他的步骤跟线性模型没啥区别

效果

vue 复制代码
<template>
  <div>
    <div style="display: flex; flex-direction: row; padding: 10px" id="vueapp">
      <img src="" alt="" class="test1" style="width: 200px" />
      <div class="card">
        <div class="card-header">此处写数字</div>
        <div class="card-body">
          <canvas
            ref="drawCanvas"
            width="200"
            height="200"
            @mousedown="canvasMouseDownHandler"
            @mousemove="canvasMouseMoveHandler"
            @mouseup="canvasMouseUpHandler"
            style="border-style: dashed; display: block"
          ></canvas>
          <div style="text-align: center">
            <button
              class="btn btn-primary"
              style="margin-top: 10px"
              @click="btnClearCanvasClickedHandler"
            >
              清空
            </button>
          </div>
        </div>
        <div class="card-header">图像数据预览</div>
        <div class="card-body" style="text-align: center; background-color: black">
          <canvas
            width="28"
            height="28"
            style="border-style: solid; border-color: white"
            ref="previewCanvas"
            class="test"
          ></canvas>
        </div>
      </div>
      <div class="card" style="margin-left: 10px">
        <div class="card-header">训练</div>
        <div class="card-body">
          关联数字:
          <input type="text" v-model="targetNum" />
          <button class="btn btn-primary" @click="btnTrainClickedHandler">训练</button>

          <div>
            <div v-html="trainStatus"></div>
          </div>
        </div>
        <div class="card-header">识别</div>
        <div class="card-body">
          <button class="btn btn-primary" @click="btnPredictClickedHandler">预测</button>
          <div>{{ result }}</div>
        </div>
      </div>
    </div>
  </div>
</template>

<script>
//识别
import * as cocossd from "@tensorflow-models/coco-ssd";
//回复
// import * as mobilenet from "@tensorflow-models/qna";

// import "https://unpkg.com/@tensorflow/tfjs"
import * as tf from "@tensorflow/tfjs";
export default {
  data() {
    return {
      targetNum: 0,
      trainStatus: "",
      result: "",
    };
  },

  mounted() {
    //
    let c2d = (this.drawCanvasContext2d = this.$refs.drawCanvas.getContext("2d"));
    c2d.lineWidth = 20;
    c2d.lineCap = "round";
    c2d.lineJoin = "round";

    this.previewCanvasContext2d = this.$refs.previewCanvas.getContext("2d");

    this.loadOrCreateModel();
  },

  methods: {
    //step1:mount的第一步:创造模型
    async loadOrCreateModel() {
      try {
        this.model = await tf.loadLayersModel("localstorage://mymodel");
      } catch (e) {
        console.warn("Can not load model from LocalStorage, so we create a new model");

        this.model = tf.sequential({
          layers: [
            tf.layers.inputLayer({ inputShape: [784] }),
            tf.layers.dense({ units: 10 }),
            tf.layers.softmax(),
          ],
        });
      }

      this.model.compile({
        optimizer: "sgd",
        loss: "categoricalCrossentropy",
        metrics: ["accuracy"],
      });
    },

    getImageData() {
      let imageData = this.previewCanvasContext2d.getImageData(0, 0, 28, 28);
      // console.log(imageData,"imageData")

      let pixelData = [];

      let color;
      for (let i = 0; i < imageData.data.length; i += 4) {
        color = (imageData.data[i] + imageData.data[i + 1] + imageData.data[i + 2]) / 3;
        pixelData.push(Math.round((255 - color) / 255));
      }

      //blob允许我们可以通过js直接操作二进制数据,通过下面注释的这一段,我们能实现预测的时候进行下载
      // document.querySelector('.test').toBlob(function(blob) {
      //   var a = document.createElement("a");
      //   var body = document.getElementsByTagName("body");
      //   document.body.appendChild(a);
      //   a.download = "img" + ".jpg";
      //   a.href = window.URL.createObjectURL(blob);

      //   a.click();
      //   body.removeChild("a");
      // });

      return pixelData;
    },

    // step2:training训练数据,单次训练
    async btnTrainClickedHandler(e) {
      let data = this.getImageData();
  

      //目标数据处理:相当于将多个数值联合放在一起作为多个相同类型的向量
      let targetTensor = tf.oneHot(parseInt(this.targetNum), 10);

      let self = this;
      //一次训练一个数据
      console.log("Start training");
      await this.model.fit(tf.tensor([data]), tf.tensor([targetTensor.arraySync()]), {
        epochs: 30,
        callbacks: {
          onEpochEnd(epoch, logs) {
            console.log(epoch, logs);
            self.trainStatus = `<div>Step: ${epoch}</div><div>Loss: ${logs.loss}</div>`;
          },
        },
      });
      self.trainStatus = `<div style="color: green;">训练完成</div>`;
      console.log("Completed");

      await this.model.save("localstorage://mymodel");
    },

    
    async btnPredictClickedHandler(e) {
      let data = this.getImageData();

      let predictions = await this.model.predict(tf.tensor([data]));
      this.result = predictions.argMax(1).arraySync()[0];
    },

    //手写的canvas
    canvasMouseDownHandler(e) {
      this.drawing = true;
      this.drawCanvasContext2d.beginPath();
      this.drawCanvasContext2d.moveTo(e.offsetX, e.offsetY);
    },

    canvasMouseMoveHandler(e) {
      //this.drawing是点击,不然的话会沿着鼠标移动的曲线进行绘图
      if (this.drawing) {
        this.drawCanvasContext2d.lineTo(e.offsetX, e.offsetY);
        this.drawCanvasContext2d.stroke();
      }
    },

    canvasMouseUpHandler(e) {
      this.drawing = false;

      this.previewCanvasContext2d.fillStyle = "white";
      this.previewCanvasContext2d.fillRect(0, 0, 28, 28);
      this.previewCanvasContext2d.drawImage(this.$refs.drawCanvas, 0, 0, 28, 28);
    },

    btnClearCanvasClickedHandler(e) {
      this.drawCanvasContext2d.clearRect(
        0,
        0,
        this.$refs.drawCanvas.width,
        this.$refs.drawCanvas.height
      );
    },
  },
};
</script>

<style lang="scss" scoped></style>

2.2 多物体识别-调用摄像头-path('/ai3')

源码链接:gitee.com/Electrolux/...

路径在 ai3 下面。我们在上面实现了 通过把 图片转化成 canvas 然后利用 里面的 imgdata 来构造 train 的 参数然后和线性走一样的流程就可以了。

这一段就是添加了 navigator.mediaDevices.getUserMedia 获取摄像头的数据并且从 canvas 对象 转化 成 image 并且 调用cocossdclassifyModel.detect(image) ,并将输出的 scoreresult 绘制出来就可以了(这里并没有训练模型,推荐这类东西放在server端完成)

vue 复制代码
<template>
  <div>
    <h1>TensorFlow.js Object Detection</h1>
    <video width="400" height="300"></video>
    <p></p>
    <img width="400" height="300" />
    1
    <div>
     
      <canvas id="canvas" width="400" height="300"></canvas>
    </div>


    
  </div>
</template>

<script>
//识别
import * as cocossd from "@tensorflow-models/coco-ssd";
//回复
// import * as mobilenet from "@tensorflow-models/qna";

// import "https://unpkg.com/@tensorflow/tfjs"
import * as tf from "@tensorflow/tfjs";
export default {
  //在浏览器中使用 MobileNet 进行摄像头物体识别

  mounted() {
    const video = document.querySelector("video");
    const image = document.querySelector("img");
    const status = document.querySelector("p");

    const canvas = document.createElement("canvas");
    const ctx = canvas.getContext("2d");

    var classifyModel
    main();
     //step1:加载摄像头
    async function main() {
      console.log("加载中")
     
      console.log("加载完成")
      const stream = await navigator.mediaDevices.getUserMedia({ video: true });
      video.srcObject = stream;
      await video.play();

      canvas.width = video.videoWidth;
      canvas.height = video.videoHeight;

      refresh();
    }
    // step2:加载摄像机,绘图
    async function refresh() {
      ctx.drawImage(video, 0, 0);
      //渲染到img上
      image.src = canvas.toDataURL("image/png");
      classifyModel = await cocossd.load();
      var predictions = await classifyModel.detect(image);
      // var className = predictions[0]?predictions[0].class:"暂时没办法识别";
      // var percentage = Math.floor(100 * predictions[0]?predictions[0].score:"0");
      let className = predictions[0].class;
      let percentage = Math.floor(100 * predictions[0].score);
      status.innerHTML = percentage + "%" + " " + className;

      let result = predictions
      const c = document.getElementById("canvas");
      const context = c.getContext("2d");
      context.drawImage(image, 0, 0);
      context.font = "10px Arial";
      
      console.log("number of detections: ", result.length);
      for (let i = 0; i < result.length; i++) {
        context.beginPath();
        context.rect(...result[i].bbox);
        context.lineWidth = 1;
        context.strokeStyle = "green";
        context.fillStyle = "green";
        context.stroke();
        context.fillText(
          result[i].score.toFixed(3) + " " + result[i].class,
          result[i].bbox[0],
          result[i].bbox[1] > 10 ? result[i].bbox[1] - 5 : 10
        );
      }

      setTimeout(refresh, 100);
    }

    // step3:识别一张图 这里的img要加上<img width="400" height="300" src="image1.png" class="single"/>
    // async function refresh() {
      
    //   const predictions = await classifyModel.detect(document.querySelector(".single"););
    //   console.log("识别一张图: ",predictions)
    // }

    

  },
};
</script>

<style lang="scss" scoped></style>

2.4 多物体识别-读照片-path('/ai2')

源码链接:gitee.com/Electrolux/...

跟上面原理差不多,区别在于我们不需要 读取 navigator.mediaDevices.getUserMedia 对参数进行转化,直接把 数据源 交给 SSD 就可以了。当然如果你想要绘图的话,最好还是新建一个 canvas 对象

vue 复制代码
<template>
  <div>
    <h1>TensorFlow.js Object Detection</h1>
    <select id="base_model">
      <option value="lite_mobilenet_v2">SSD Lite Mobilenet V2</option>
      <option value="mobilenet_v1">SSD Mobilenet v1</option>
      <option value="mobilenet_v2">SSD Mobilenet v2</option>
    </select>
    <button type="button" id="run">Run</button>
    <button type="button" id="toggle">Toggle Image</button>
    <div>
      <img id="image" />
      <canvas id="canvas" width="600" height="399"></canvas>
    </div>
  </div>
</template>

<script>
import "@tensorflow/tfjs-backend-cpu";
import "@tensorflow/tfjs-backend-webgl";

import * as cocoSsd from "@tensorflow-models/coco-ssd";

import imageURL from "./image1.jpg";
import image2URL from "./image2.jpg";

export default {
  //识别算法,调用别人的
  async mounted() {
    let modelPromise;

    await (modelPromise = cocoSsd.load());

    const button = document.getElementById("toggle");
    button.onclick = () => {
      image.src = image.src.endsWith(imageURL) ? image2URL : imageURL;
    };

    const select = document.getElementById("base_model");
    select.onchange = async (event) => {
      const model = await modelPromise;
      model.dispose();
      modelPromise = cocoSsd.load({
        base: event.srcElement.options[event.srcElement.selectedIndex].value,
      });
    };

    const image = document.getElementById("image");
    image.src = imageURL;

    const runButton = document.getElementById("run");
    runButton.onclick = async () => {
      const model = await modelPromise;
      console.log("model loaded");
      console.time("predict1");
      const result = await model.detect(image);
      console.log(result,"预测结果")
      console.timeEnd("predict1");

      const c = document.getElementById("canvas");
      const context = c.getContext("2d");
      context.drawImage(image, 0, 0);
      context.font = "10px Arial";

      console.log("number of detections: ", result.length);
      for (let i = 0; i < result.length; i++) {
        context.beginPath();
        context.rect(...result[i].bbox);
        context.lineWidth = 1;
        context.strokeStyle = "green";
        context.fillStyle = "green";
        context.stroke();
        context.fillText(
          result[i].score.toFixed(3) + " " + result[i].class,
          result[i].bbox[0],
          result[i].bbox[1] > 10 ? result[i].bbox[1] - 5 : 10
        );
      }
    };
  },
};
</script>

<style lang="scss" scoped></style>
相关推荐
耶啵奶膘14 分钟前
uniapp-是否删除
linux·前端·uni-app
王哈哈^_^2 小时前
【数据集】【YOLO】【目标检测】交通事故识别数据集 8939 张,YOLO道路事故目标检测实战训练教程!
前端·人工智能·深度学习·yolo·目标检测·计算机视觉·pyqt
cs_dn_Jie2 小时前
钉钉 H5 微应用 手机端调试
前端·javascript·vue.js·vue·钉钉
开心工作室_kaic3 小时前
ssm068海鲜自助餐厅系统+vue(论文+源码)_kaic
前端·javascript·vue.js
有梦想的刺儿3 小时前
webWorker基本用法
前端·javascript·vue.js
cy玩具4 小时前
点击评论详情,跳到评论页面,携带对象参数写法:
前端
清灵xmf4 小时前
TypeScript 类型进阶指南
javascript·typescript·泛型·t·infer
小白学大数据4 小时前
JavaScript重定向对网络爬虫的影响及处理
开发语言·javascript·数据库·爬虫
qq_390161774 小时前
防抖函数--应用场景及示例
前端·javascript
334554325 小时前
element动态表头合并表格
开发语言·javascript·ecmascript