前言
在日常业务中,我们会遇到截取用户上传的图片作为头像或者截取用户上传的图片保存这种需求,遇到这种需求,如果我们没有接触过,还真会束手无策,接下来,我将一步一步的用Canvas实现。
思路
- 将上传的图片用Canvas绘制出来。
- 然后建立一个mark蒙层,使用Canvas的fillRect
- 根据用户的鼠标时间记录出截取的坐标
- 使用Canvas.globalCompositeOperation='destination-out'和Cavans.globalCompositeOperation='destination-over'实现截取的部分高亮。
- 使用Canvas展示出截取的图片
实现
首先我们的准备一些DOM结构
html
<style>
.canvas-container ,.canvas-container2{
display: none;
border: 1px solid wheat;
}
body{
display: flex;
flex-flow: column;
}
</style>
<div>
<input type="file" accept="image/*" id="imageFile">
</div>
<div class="canvas-container">
<canvas id="can"></canvas>
</div>
<div class="canvas-container2">
<canvas id="can2"></canvas>
</div>
然后我们需要在scirpt标签中获取这些DOM结构
js
const oContainer=document.querySelector('.canvas-container')
const oContainer2=document.querySelector('.canvas-container2')
const oImageFile=document.querySelector('#imageFile')
const oCan=document.getElementById('can')
const oCan2=document.getElementById('can2')
const ctx=oCan.getContext('2d')
const ctx2=oCan2.getContext('2d')
其中,ctx是用来绘制上传的图片,ctx2是用来绘制截取后的图片。
首先我们得获取input标签中,上传的图片信息,所以我们创建一个init函数,并且监听一个change事件,用来获取图片信息,获取图片的信息我们使用FileReader这个类。
js
const oImage=new Image()
const init=()=>{
oImageFile.addEventListener('change',handleFileInput,false)
oCan.addEventListener('mousedown',handleCanvasMousedown,false)
}
init()
function handleFileInput(e){
const file=e.target.files[0]
const reader=new FileReader();
reader.readAsDataURL(file);
reader.onload=function(e){
const data=e.target.result;
oImage.src=data;
console.log(oImage)
oImage.onload=function(){
const {width,height}=this;
console.log(width)
}
}
}
这里我们用FileReader这个类获取了input标签拿到的图片后,我们读取一下,这里e.target.result拿到的是图片base64的信息,这里我们创建一个Image的类,然后将这个base64的信息赋值给创建好的oImage.src,然后监听oImaged的onload事件,这里我们就可以拿到图片的宽高了。
随后我们只需要将上传的图片用ctx绘制出就可以了,我们创建一个generateCanvas函数
js
function generateCanvas (container,canvas,width,height){
container.style.width=width + 'px';
container.style.height=height + 'px';
canvas.width=width;
canvas.height=height;
container.style.display='block'
}
然后我们把这个函数在handleFileInput这个函数中调用一下。
js
const oImage=new Image()
const init=()=>{
oImageFile.addEventListener('change',handleFileInput,false)
oCan.addEventListener('mousedown',handleCanvasMousedown,false)
}
init()
function handleFileInput(e){
const file=e.target.files[0]
const reader=new FileReader();
reader.readAsDataURL(file);
reader.onload=function(e){
const data=e.target.result;
oImage.src=data;
console.log(oImage)
oImage.onload=function(){
const {width,height}=this;
console.log(width)
generateCanvas(oContainer,oCan,width,height)
ctx.drawImage(oImage,0,0,width,height)
}
}
}
最后调用一下ctx,drawImage这个方法就可以绘制出上传的图片啦。
第二步,到了创建mark蒙层的时候了。
我们创建一个drawImageMask方法。
js
function drawImageMask(x,y,width,height,opcity){
ctx.fillStyle=`rgba(0,0,0,${opcity})`
ctx.fillRect(0,0,width,height)
}
同样的,我们在handleFileInput这个函数中调用一下。
js
const MASK_OPCITY=0.5
function handleFileInput(e){
const file=e.target.files[0]
const reader=new FileReader();
reader.readAsDataURL(file);
reader.onload=function(e){
const data=e.target.result;
oImage.src=data;
console.log(oImage)
oImage.onload=function(){
const {width,height}=this;
console.log(width)
generateCanvas(oContainer,oCan,width,height)
ctx.drawImage(oImage,0,0,width,height)
drawImageMask(0,0,width,height,MASK_OPCITY)
}
}
}
然后我们就需要监听用户的鼠标时间信息,去记录用户的起始鼠标坐标和结束时的鼠标坐标,然后求出rectX和rectY。
js
const oContainer=document.querySelector('.canvas-container')
const oContainer2=document.querySelector('.canvas-container2')
const oImageFile=document.querySelector('#imageFile')
const oCan=document.getElementById('can')
const oCan2=document.getElementById('can2')
const ctx=oCan.getContext('2d')
const ctx2=oCan2.getContext('2d')
const oImage=new Image()
let initPoint=[]
const MASK_OPCITY=0.5
const init=()=>{
oImageFile.addEventListener('change',handleFileInput,false)
oCan.addEventListener('mousedown',handleCanvasMousedown,false)
}
init()
function handleFileInput(e){
const file=e.target.files[0]
const reader=new FileReader();
reader.readAsDataURL(file);
reader.onload=function(e){
const data=e.target.result;
oImage.src=data;
console.log(oImage)
oImage.onload=function(){
const {width,height}=this;
console.log(width)
generateCanvas(oContainer,oCan,width,height)
ctx.drawImage(oImage,0,0,width,height)
drawImageMask(0,0,width,height,MASK_OPCITY)
}
}
}
function handleCanvasMousedown(e){
initPoint=[e.offsetX,e.offsetY]
oCan.addEventListener('mousemove',handleCanvasMousemove,false)
oCan.addEventListener('mouseup',handleCanvasMouseup,false)
}
function handleCanvasMousemove(e){
const endX=e.offsetX;
const endY=e.offsetY;
const rectX= endX - initPoint[0];
const rectY = endY - initPoint [1];
const {width ,height} =oCan;
}
function handleCanvasMouseup(e){
oCan.removeEventListener('mousemove',handleCanvasMousemove,false)
oCan.removeEventListener('mouseup',handleCanvasMouseup,false)
}
这里新增的代码主要就是监听mousedowm、mousemove、mouseup这三个时间,然后再mousemove的时候去求差并记录下来。
接下来,我们就可以做截取高亮的部分了,我们在每一次mousemove后都需要去清除ctx的fillRect,然后重新绘制蒙层,这里截取高亮的部分的逻辑我们封装到drawScreenShot方法中。
js
function drawScreenShot(canWidth,canHeight,rectWidth,rectHeight){
ctx.globalCompositeOperation='destination-out';
ctx.fillStyle='#000';
ctx.fillRect(...initPoint,rectWidth,rectHeight);
ctx.globalCompositeOperation='destination-over';
ctx.drawImage(oImage,0,0,canWidth,canHeight,0,0,canWidth,canHeight)
}
这里我们接受Canvas画板的宽度和高度,鼠标dowm和鼠标up的坐标差。
js
ctx.globalCompositeOperation='destination-out';
ctx.fillStyle='#000';
ctx.fillRect(...initPoint,rectWidth,rectHeight);
这里代码表示,原先的内容保持在不重叠的地方。这里的fillStyle的颜色不重要,什么都可以。
js
ctx.globalCompositeOperation='destination-over';
ctx.drawImage(oImage,0,0,canWidth,canHeight,0,0,canWidth,canHeight)
这两行代码的意思就是在现有的画布内容后面绘制新的图形。这样就只会保留ctx.fillRect那部分高亮。
然后我们就可以在mousemove的时候去调用这个函数。
js
function handleCanvasMousemove(e){
const endX=e.offsetX;
const endY=e.offsetY;
const rectX= endX - initPoint[0];
const rectY = endY - initPoint [1];
const {width ,height} =oCan;
screenShotData = [initPoint[0],initPoint[1],rectX,rectY]
ctx.clearRect(0,0,width,height);
drawImageMask(0,0,width,height,MASK_OPCITY)
drawScreenShot(width,height,rectX,rectY)
}
最后,我们只需要将截取出来的图片放到ctx2就好了。
我们创建一个drawScreenShotImage函数
js
const screenShotData = [initPoint[0],initPoint[1],rectX,rectY]
function drawScreenShotImage(screenShotData){
const data=ctx.getImageData(...screenShotData);
generateCanvas(oContainer2,oCan2,screenShotData[2],screenShotData[3])
ctx2.clearRect(...screenShotData)
ctx2.putImageData(data,0,0)
}
这里我们通过getImageData这个方法拿到了ctx的截取部分的图片信息,然后绘制到ctx2。
这个方法在mouseup时调用。
js
function handleCanvasMouseup(e){
oCan.removeEventListener('mousemove',handleCanvasMousemove,false)
oCan.removeEventListener('mouseup',handleCanvasMouseup,false)
drawScreenShotImage(screenShotData)
}
全部源码
html
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
<style>
.canvas-container ,.canvas-container2{
display: none;
border: 1px solid wheat;
}
body{
display: flex;
flex-flow: column;
}
</style>
</head>
<body>
<div>
<input type="file" accept="image/*" id="imageFile">
</div>
<div class="canvas-container">
<canvas id="can"></canvas>
</div>
<div class="canvas-container2">
<canvas id="can2"></canvas>
</div>
<script >
const oContainer=document.querySelector('.canvas-container')
const oContainer2=document.querySelector('.canvas-container2')
const oImageFile=document.querySelector('#imageFile')
const oCan=document.getElementById('can')
const oCan2=document.getElementById('can2')
const ctx=oCan.getContext('2d')
const ctx2=oCan2.getContext('2d')
const oImage=new Image()
let initPoint=[]
let screenShotData=[];
const MASK_OPCITY=0.5
const init=()=>{
oImageFile.addEventListener('change',handleFileInput,false)
oCan.addEventListener('mousedown',handleCanvasMousedown,false)
}
init()
function handleFileInput(e){
const file=e.target.files[0]
const reader=new FileReader();
reader.readAsDataURL(file);
reader.onload=function(e){
const data=e.target.result;
oImage.src=data;
console.log(oImage)
oImage.onload=function(){
const {width,height}=this;
console.log(width)
generateCanvas(oContainer,oCan,width,height)
ctx.drawImage(oImage,0,0,width,height)
drawImageMask(0,0,width,height,MASK_OPCITY)
}
}
}
function handleCanvasMousedown(e){
initPoint=[e.offsetX,e.offsetY]
oCan.addEventListener('mousemove',handleCanvasMousemove,false)
oCan.addEventListener('mouseup',handleCanvasMouseup,false)
}
function handleCanvasMousemove(e){
const endX=e.offsetX;
const endY=e.offsetY;
const rectX= endX - initPoint[0];
const rectY = endY - initPoint [1];
const {width ,height} =oCan;
screenShotData = [initPoint[0],initPoint[1],rectX,rectY]
ctx.clearRect(0,0,width,height);
drawImageMask(0,0,width,height,MASK_OPCITY)
drawScreenShot(width,height,rectX,rectY)
}
function handleCanvasMouseup(e){
oCan.removeEventListener('mousemove',handleCanvasMousemove,false)
oCan.removeEventListener('mouseup',handleCanvasMouseup,false)
drawScreenShotImage(screenShotData)
}
function generateCanvas (container,canvas,width,height){
container.style.width=width + 'px';
container.style.height=height + 'px';
canvas.width=width;
canvas.height=height;
container.style.display='block'
}
function drawImageMask(x,y,width,height,opcity){
ctx.fillStyle=`rgba(0,0,0,${opcity})`
ctx.fillRect(0,0,width,height)
}
function drawScreenShot(canWidth,canHeight,rectWidth,rectHeight){
ctx.globalCompositeOperation='destination-out';
ctx.fillStyle='#000';
ctx.fillRect(...initPoint,rectWidth,rectHeight);
ctx.globalCompositeOperation='destination-over';
ctx.drawImage(oImage,0,0,canWidth,canHeight,0,0,canWidth,canHeight)
}
function drawScreenShotImage(screenShotData){
const data=ctx.getImageData(...screenShotData);
generateCanvas(oContainer2,oCan2,screenShotData[2],screenShotData[3])
ctx2.clearRect(...screenShotData)
ctx2.putImageData(data,0,0)
}
</script>
</body>
</html>