概述:后端服务(Node + Express)、前端(vue+spark-md5)
一、后端服务
1、创建后端项目
js
mkdir upload-server
cd upload-server
npm init -y
npm install express cors multer fs-extra
2、 后端完整代码 server.js
js
const express = require('express');
const cors = require('cors');
const multer = require('multer');
const fse = require('fs-extra');
const path = require('path');
const app = express();
// 解决跨域 + 大文件请求体限制
app.use(cors());
app.use(express.json({ limit: '100mb' }));
app.use(express.urlencoded({ extended: true, limit: '100mb' }));
// 配置(和前端完全一致)
const UPLOAD_DIR = path.resolve(__dirname, 'upload'); // 分片临时目录
const MERGE_DIR = path.resolve(__dirname, 'merged'); // 合并后文件目录
const CHUNK_SIZE = 2 * 1024 * 1024; // 2MB 分片
// 确保目录存在(启动时就创建,避免运行时创建失败)
fse.ensureDirSync(UPLOAD_DIR);
fse.ensureDirSync(MERGE_DIR);
// ✅ 修复1:multer 配置,不再从 req.body 取参数,改用动态存储
const storage = multer.memoryStorage(); // 改用内存存储,避免目录创建时序问题
const upload = multer({
storage,
limits: { fileSize: 5 * 1024 * 1024 } // 限制分片大小,和前端一致
});
/**
* 1. 查询已上传的分片(断点续传/秒传核心)
*/
app.post('/checkfile', async (req, res) => {
try {
const { fileHash, fileName } = req.body;
if (!fileHash || !fileName) {
return res.status(400).json({ code: -1, msg: '参数缺失' });
}
const ext = path.extname(fileName);
const filePath = path.resolve(MERGE_DIR, `${fileHash}${ext}`);
// 秒传:文件已存在
if (fse.existsSync(filePath)) {
return res.json({ code: 0, uploadedChunks: [], shouldUpload: false });
}
// 读取已上传分片
const chunkDir = path.resolve(UPLOAD_DIR, fileHash);
let uploadedChunks = [];
if (fse.existsSync(chunkDir)) {
uploadedChunks = await fse.readdir(chunkDir);
}
res.json({ code: 0, uploadedChunks, shouldUpload: true });
} catch (error) {
console.error('checkfile 错误:', error);
res.status(500).json({ code: -1, msg: '服务器错误' });
}
});
/**
* 2. 上传分片(修复核心:手动处理存储,避免 multer 时序问题)
*/
app.post('/uploadchunk', upload.single('chunk'), async (req, res) => {
try {
const { fileHash, chunkIndex } = req.body;
const chunk = req.file; // multer 解析后的文件 buffer
if (!fileHash || chunkIndex === undefined || !chunk) {
return res.status(400).json({ code: -1, msg: '参数缺失' });
}
// 手动创建分片目录(确保存在)
const chunkDir = path.resolve(UPLOAD_DIR, fileHash);
await fse.ensureDir(chunkDir);
// 手动写入分片文件
const chunkPath = path.resolve(chunkDir, chunkIndex.toString());
await fse.writeFile(chunkPath, chunk.buffer);
res.json({ code: 0, msg: '分片上传成功' });
} catch (error) {
console.error('uploadchunk 错误:', error);
res.status(500).json({ code: -1, msg: '分片上传失败', error: error.message });
}
});
/**
* 3. 合并所有分片
*/
app.post('/mergefile', async (req, res) => {
try {
const { fileHash, fileName, chunkCount } = req.body;
if (!fileHash || !fileName || !chunkCount) {
return res.status(400).json({ code: -1, msg: '参数缺失' });
}
const chunkDir = path.resolve(UPLOAD_DIR, fileHash);
const ext = path.extname(fileName);
const filePath = path.resolve(MERGE_DIR, `${fileHash}${ext}`);
// 检查分片目录是否存在
if (!fse.existsSync(chunkDir)) {
return res.status(400).json({ code: -1, msg: '分片目录不存在' });
}
// 按顺序合并分片
const writeStream = fse.createWriteStream(filePath);
for (let i = 0; i < chunkCount; i++) {
const chunkPath = path.resolve(chunkDir, i.toString());
// 检查分片是否存在
if (!fse.existsSync(chunkPath)) {
return res.status(400).json({ code: -1, msg: `分片 ${i} 缺失` });
}
const readStream = fse.createReadStream(chunkPath);
await new Promise((resolve, reject) => {
readStream.pipe(writeStream, { end: false });
readStream.on('end', resolve);
readStream.on('error', reject);
});
}
// 关闭写入流
writeStream.end();
// 合并完成删除分片目录
await fse.remove(chunkDir);
res.json({ code: 0, msg: '文件合并成功', url: `/merged/${fileHash}${ext}` });
} catch (error) {
console.error('mergefile 错误:', error);
res.status(500).json({ code: -1, msg: '合并失败', error: error.message });
}
});
// 静态资源访问合并后的文件
app.use('/merged', express.static(MERGE_DIR));
const PORT = 3000;
app.listen(PORT, () => {
console.log(`后端服务启动成功:http://localhost:${PORT}`);
console.log(`分片存储目录:${UPLOAD_DIR}`);
console.log(`合并后文件目录:${MERGE_DIR}`);
});
3、启动后端:
node server.js
二、前端服务
1、.vue文件
js
<template>
<div id="app" style="max-width: 800px;margin: 50px auto;">
<h2>Vue2 大文件分片上传(断点续传)</h2>
<input type="file" @change="handleFileChange">
<button :disabled="!file || uploading" style="margin-left: 10px;" @click="handleUpload">
{{ uploading ? '上传中...' : '开始上传' }}
</button>
<!-- 进度条(已修复动态样式) -->
<div v-if="totalProgress > 0" style="margin-top: 20px;">
<div>总进度:{{ totalProgress.toFixed(2) }}%</div>
<div style="height:5px;background:#eee;border-radius:3px;">
<div
:style="{
height: '100%',
background: '#42b983',
width: totalProgress + '%',
transition: '0.3s'
}"
/>
</div>
</div>
<div style="margin-top: 20px;color: #333;">
<p v-if="uploadedChunkList.length">已上传分片:{{ uploadedChunkList.join(',') }}</p>
<p v-if="msg" :style="{color: msg.includes('成功') ? 'green' : 'red'}">{{ msg }}</p>
</div>
</div>
</template>
<script>
import SparkMD5 from 'spark-md5'
import axios from 'axios'
export default {
name: 'App',
data() {
return {
file: null,
fileHash: '',
CHUNK_SIZE: 2 * 1024 * 1024, // 2MB 分片
chunkList: [],
uploadedChunkList: [],
uploading: false,
totalProgress: 0,
msg: '',
MAX_CONCURRENT: 3 // ✅ 新增:最大并发数,控制同时上传的分片数量
}
},
methods: {
// 1. 选择文件
async handleFileChange(e) {
const file = e.target.files[0]
if (!file) return
this.file = file
this.msg = '正在计算文件指纹...'
this.fileHash = await this.getFileHash(file)
this.msg = `文件:${file.name},hash:${this.fileHash.slice(0, 10)}...`
},
// 2. 计算文件 MD5(优化:大文件完整计算,避免 hash 冲突)
getFileHash(file) {
return new Promise((resolve, reject) => {
const spark = new SparkMD5.ArrayBuffer()
const fileReader = new FileReader()
const chunkSize = 2 * 1024 * 1024
let offset = 0
const loadNext = () => {
const slice = file.slice(offset, offset + chunkSize)
fileReader.readAsArrayBuffer(slice)
}
fileReader.onload = (e) => {
spark.append(e.target.result)
offset += e.target.result.byteLength
if (offset < file.size) {
loadNext()
} else {
resolve(spark.end())
}
}
fileReader.onerror = reject
loadNext()
})
},
// 3. 开始上传(主流程,修复并发+进度)
async handleUpload() {
if (!this.file) return alert('请选择文件')
this.uploading = true
this.totalProgress = 0
this.msg = ''
try {
// 1)查询已上传分片
const { data } = await axios.post('http://localhost:3000/checkfile', {
fileHash: this.fileHash,
fileName: this.file.name
})
// 秒传
if (!data.shouldUpload) {
this.msg = '✅ 秒传成功:文件已存在'
this.uploading = false
this.totalProgress = 100
return
}
this.uploadedChunkList = data.uploadedChunks.map(String) // 统一转字符串,避免类型不匹配
// 2)切分文件
this.chunkList = this.createChunks(this.file)
const total = this.chunkList.length
let uploadedCount = this.uploadedChunkList.length // 已上传分片数
this.totalProgress = (uploadedCount / total) * 100 // 初始化进度
// 3)过滤出需要上传的分片
const needUploadChunks = this.chunkList
.map((chunk, index) => ({ chunk, index }))
.filter(item => !this.uploadedChunkList.includes(item.index.toString()))
// ✅ 4)并发控制上传(核心优化,避免后端 500)
await this.concurrentUpload(needUploadChunks, total, (count) => {
uploadedCount += count
this.totalProgress = (uploadedCount / total) * 100
})
// 5)通知后端合并文件
const mergeRes = await axios.post('http://localhost:3000/mergefile', {
fileHash: this.fileHash,
fileName: this.file.name,
chunkCount: this.chunkList.length
})
if (mergeRes.data.code === 0) {
this.msg = '✅ 上传 + 合并完成!'
this.totalProgress = 100
} else {
this.msg = `❌ 合并失败:${mergeRes.data.msg || '未知错误'}`
}
} catch (error) {
console.error('上传错误:', error)
this.msg = `❌ 上传失败:${error.message || '未知错误'}`
} finally {
this.uploading = false
}
},
// ✅ 并发控制上传方法
async concurrentUpload(chunks, total, onProgress) {
console.log('999999 分片数量', chunks)
const results = []
// 分批上传,每批 MAX_CONCURRENT 个
for (let i = 0; i < chunks.length; i += this.MAX_CONCURRENT) {
const batch = chunks.slice(i, i + this.MAX_CONCURRENT)
const batchPromises = batch.map(async({ chunk, index }) => {
const formData = new FormData()
formData.append('chunk', chunk)
formData.append('fileHash', this.fileHash)
formData.append('chunkIndex', index)
// 重试逻辑:失败自动重试 2 次
for (let retry = 0; retry < 2; retry++) {
try {
await axios.post('http://localhost:3000/uploadchunk', formData, {
headers: { 'Content-Type': 'multipart/form-data' }
})
return { success: true, index }
} catch (e) {
console.warn(`分片 ${index} 上传失败,重试 ${retry + 1}`)
if (retry === 1) throw e
await new Promise(resolve => setTimeout(resolve, 1000)) // 重试间隔 1s
}
}
})
const batchResults = await Promise.allSettled(batchPromises)
results.push(...batchResults)
// 更新进度:每批完成后计算
const successCount = batchResults.filter(r => r.status === 'fulfilled' && r.value.success).length
onProgress(successCount)
}
// 检查是否有失败的分片
const failed = results.filter(r => r.status === 'rejected' || !r.value?.success)
if (failed.length > 0) {
throw new Error(`有 ${failed.length} 个分片上传失败`)
}
},
// 切分文件
createChunks(file) {
const chunks = []
let start = 0
while (start < file.size) {
const end = Math.min(start + this.CHUNK_SIZE, file.size)
chunks.push(file.slice(start, end))
start = end
}
return chunks
}
}
}
</script>
2、安装依赖
js
npm install axios spark-md5 --save
3、启动服务
npm run serve
总结:
- 查询列表(文件是否已上传/已传切片index)-文件hash唯一性(spark-md5获取)
- 开始上传,过滤已上传的切片数
- 剩余切片分批次并行上传(第一次失败,自动重试 1 次,第二次失败,抛错)
- 全部切片上传成功后,调用合并接口(通知后端可以合并切片数了)
秒传:文件完整存在 → 直接跳过上传
断点续传:只传缺失分片 → 断网 / 刷新可恢复(hash查询,再过滤)
并发控制:分批次,防止同时发大量请求导致崩溃
备注:按以上步骤,直接可以实践操作