oss存储分片的简单思路

思路:

前端:1.计算要分片的文件,分片总数

2.调用后端初始化分片信息

3.查询分片进度

4.上传分片

后端:1.初始化分片信息(redis)(文件名/分片总数/已上传分片书/上传时间)

2.检查分片数(已上传哪些分片)

3.上传分片(通过sha-256算法计算分片值)

4.合并分片(校验分片是否全部上传,如果有未上传的返回给前端)

5.上传到oss

前端代码:仅供参考

javascript 复制代码
// 完整的uploadFile函数
async function uploadFile() {
    const fileInput = document.getElementById('fileInput');
    const file = fileInput.files[0];
    if (!file) {
        alert('请选择文件');
        return;
    }
    
    const fileName = file.name;
    const chunkSize = 1024 * 10; // 10KB
    const totalChunks = Math.ceil(file.size / chunkSize);
    
    // 1. 初始化上传
    const initResponse = await fetch('/hospital/file2/init', {
        method: 'POST',
        headers: {
            'Content-Type': 'application/x-www-form-urlencoded',
        },
        body: `fileName=${encodeURIComponent(fileName)}&totalChunks=${totalChunks}`
    });
    
    if (!initResponse.ok) {
        console.error('初始化失败');
        alert('上传初始化失败');
        return;
    }
    
    const fileId = await initResponse.text();
    console.log('文件ID:', fileId);
    
    // 2. 检查上传进度
    const progressResponse = await fetch(`/hospital/file2/progress/${fileId}`);
    if (progressResponse.ok) {
        const progressData = await progressResponse.json();
        const uploadedChunks = progressData.uploadedChunkIndexes || [];
        
        console.log(`已上传 ${uploadedChunks.length}/${totalChunks} 个分片`);
        
        // 3. 上传未完成的分片
        for (let i = 0; i < totalChunks; i++) {
            if (!uploadedChunks.includes(i)) {
                try {
                    await uploadChunk(file, i, fileId, chunkSize);
                    console.log(`分片 ${i} 上传成功`);
                } catch (error) {
                    console.error(`分片 ${i} 上传失败`, error);
                    // 重试逻辑
                    let retryCount = 0;
                    while (retryCount < 3) {
                        try {
                            await uploadChunk(file, i, fileId, chunkSize);
                            console.log(`分片 ${i} 重试成功`);
                            break;
                        } catch (retryError) {
                            retryCount++;
                            if (retryCount === 3) {
                                alert(`分片 ${i} 上传失败,请重新上传`);
                                return;
                            }
                        }
                    }
                }
            }
        }
    }
    
    // 4. 合并文件
    try {
        const result = await mergeFile(fileId);
        console.log('文件上传成功:', result);
        alert('文件上传成功');
    } catch (error) {
        console.error('合并失败', error);
        alert('文件合并失败');
    }
}

// 上传分片
async function uploadChunk(file, chunkIndex, fileId, chunkSize) {
    const chunk = file.slice(chunkIndex * chunkSize, 
                            Math.min((chunkIndex + 1) * chunkSize, file.size));
    
    const checksum = await calculateHash(chunk);
    
    const formData = new FormData();
    formData.append('chunk', chunk);
    formData.append('chunkIndex', chunkIndex);
    formData.append('chunkChecksum', checksum);
    formData.append('fileId', fileId);
    
    const response = await fetch('/hospital/file2/upload', {
        method: 'POST',
        body: formData
    });
    
    if (!response.ok) {
        const errorText = await response.text();
        throw new Error(`上传失败: ${errorText}`);
    }
    
    return await response.json();
}

// 合并文件
async function mergeFile(fileId) {
    const response = await fetch('/hospital/file2/merge', {
        method: 'POST',
        headers: {
            'Content-Type': 'application/x-www-form-urlencoded',
        },
        body: `fileId=${fileId}`
    });
    
    if (!response.ok) {
        if (response.status === 400) {
            // 如果有缺失的分片,返回缺失分片列表
            const missingChunks = await response.json();
            throw new Error(`缺少分片: ${JSON.stringify(missingChunks)}`);
        }
        const errorText = await response.text();
        throw new Error(`合并失败: ${errorText}`);
    }
    
    return await response.json();
}

// 计算文件分片的SHA-256哈希
function calculateHash(chunk) {
    return new Promise((resolve, reject) => {
        const reader = new FileReader();
        reader.readAsArrayBuffer(chunk);
        reader.onload = async () => {
            try {
                const arrayBuffer = reader.result;
                const hashBuffer = await crypto.subtle.digest('SHA-256', arrayBuffer);
                const hashArray = Array.from(new Uint8Array(hashBuffer));
                const hashHex = hashArray.map(b => b.toString(16).padStart(2, '0')).join('');
                resolve(hashHex);
            } catch (error) {
                reject(error);
            }
        };
        reader.onerror = () => {
            reject(new Error('读取文件分片失败'));
        };
    });
}

// 查询上传进度
async function checkProgress(fileId) {
    try {
        const response = await fetch(`/hospital/file2/progress/${fileId}`);
        if (response.ok) {
            const progressData = await response.json();
            return progressData;
        }
    } catch (error) {
        console.error('查询进度失败', error);
    }
    return null;
}

// 暂停上传(示例函数)
function pauseUpload() {
    // 可以设置一个标志位来暂停上传
    isUploadPaused = true;
}

// 继续上传(示例函数)
async function resumeUpload(fileId) {
    isUploadPaused = false;
    const progressData = await checkProgress(fileId);
    if (progressData) {
        console.log('继续上传,进度:', progressData.progress + '%');
        // 根据进度信息继续上传
    }
}

后端代码:仅供参考

java 复制代码
 @RestController
    @RequestMapping("/file2")
    @Slf4j
    public class File2Controller {
        private static final String FILE_UPLOAD_PREFIX = "file_upload:";

        @Autowired
        private RedisTemplate<String, Object> redisTemplate;

        @Value("${my.config.savePath}")
        private String uploadPath;

        /**
         * 初始化分片信息
         * @param fileName
         * @param totalChunks
         * @return
         */
        @PostMapping("/init")
        public ResponseEntity<?> initUpload(@RequestParam("fileName") String fileName,
                                            @RequestParam("totalChunks") Integer totalChunks) {
            String fileId = UUID.randomUUID().toString();
            String key = FILE_UPLOAD_PREFIX + fileId;

            // 存储文件元信息
            Map<String, Object> metadata = new HashMap<>();
            metadata.put("fileName", fileName); // 文件名
            metadata.put("totalChunks", totalChunks); // 分片总数
            metadata.put("uploadTime", System.currentTimeMillis()); // 上传时间
            metadata.put("uploadedChunks", 0); // 已上传分片数

            redisTemplate.opsForHash().putAll(key, metadata);
            // 设置过期时间,比如24小时
            redisTemplate.expire(key, 24, TimeUnit.HOURS);

            return ResponseEntity.ok(fileId);
        }

        /**
         * 获取上传进度
         * @param fileId
         * @return
         */
        @GetMapping("/progress/{fileId}")
        public ResponseEntity<?> getUploadProgress(@PathVariable String fileId) {
            String key = FILE_UPLOAD_PREFIX + fileId;

            if (!redisTemplate.hasKey(key)) {
                return ResponseEntity.status(HttpStatus.NOT_FOUND)
                        .body("File not found");
            }

            Map<Object, Object> metadata = redisTemplate.opsForHash().entries(key);
            Integer totalChunks = (Integer) metadata.get("totalChunks");
            Integer uploadedChunks = (Integer) metadata.get("uploadedChunks");

            // 获取已上传的分片索引
            List<Integer> uploadedChunkIndexes = new ArrayList<>();
            Set<Object> keys = redisTemplate.opsForHash().keys(key);
            for (Object k : keys) {
                if (k.toString().startsWith("chunk_")) {
                    uploadedChunkIndexes.add(Integer.parseInt(k.toString().substring(6)));
                }
            }

            Map<String, Object> response = new HashMap<>();
            response.put("totalChunks", totalChunks); // 总分片数
            response.put("uploadedChunks", uploadedChunks); // 已上传分片数
            response.put("uploadedChunkIndexes", uploadedChunkIndexes); // 已上传分片索引(第几个分片)
            response.put("progress", (uploadedChunks * 100.0) / totalChunks); // 上传进度

            return ResponseEntity.ok(response);
        }

        /**
         * 上传分片
         * @param chunk
         * @param chunkIndex
         * @param chunkChecksum
         * @param fileId
         * @return
         */
        @PostMapping("/upload")
        public ResponseEntity<?> uploadFile(@RequestParam("chunk") MultipartFile chunk, // 分片文件
                                            @RequestParam("chunkIndex") Integer chunkIndex, // 分片索引(第几个分片)
                                            @RequestParam("chunkChecksum") String chunkChecksum, // 前端SHA-256计算后的对比值
                                            @RequestParam("fileId") String fileId) throws Exception {

            String key = FILE_UPLOAD_PREFIX + fileId;

            // 检查文件是否存在
            if (!redisTemplate.hasKey(key)) {
                return ResponseEntity.status(HttpStatus.NOT_FOUND)
                        .body("File not initialized or expired");
            }

            byte[] chunkBytes = chunk.getBytes();
            String actualChecksum = calculateHash(chunkBytes);

            if (!chunkChecksum.equals(actualChecksum)) {
                return ResponseEntity.status(HttpStatus.BAD_REQUEST)
                        .body("Chunk checksum does not match");
            }

            // 存储分片内容
            redisTemplate.opsForHash().put(key, "chunk_" + chunkIndex, chunkBytes);

            // 更新已上传分片数
            redisTemplate.opsForHash().increment(key, "uploadedChunks", 1);

            // 获取已上传分片列表
            List<Integer> uploadedChunks = new ArrayList<>();
            Set<Object> keys = redisTemplate.opsForHash().keys(key);
            for (Object k : keys) {
                if (k.toString().startsWith("chunk_")) {
                    uploadedChunks.add(Integer.parseInt(k.toString().substring(6)));
                }
            }

            Map<String, Object> response = new HashMap<>();
            response.put("fileId", fileId);
            response.put("uploadedChunks", uploadedChunks);

            return ResponseEntity.ok(response);
        }


        /**
         * 合并文件
         * @param fileId
         * @return
         * @throws IOException
         */
        @PostMapping("/merge")
        public ResponseEntity<?> mergeFile(@RequestParam("fileId") String fileId) throws IOException {
            String key = FILE_UPLOAD_PREFIX + fileId;

            Map<Object, Object> allData = redisTemplate.opsForHash().entries(key);

            if (allData.isEmpty()) {
                return ResponseEntity.status(HttpStatus.NOT_FOUND)
                        .body("File not found");
            }

            String fileName = (String) allData.get("fileName");
            Integer totalChunks = (Integer) allData.get("totalChunks");

            // 检查是否所有分片都已上传
            List<Integer> missingChunks = new ArrayList<>();
            for (int i = 0; i < totalChunks; i++) {
                if (!allData.containsKey("chunk_" + i)) {
                    missingChunks.add(i);
                }
            }

            if (!missingChunks.isEmpty()) {
                return ResponseEntity.status(HttpStatus.BAD_REQUEST)
                        .body(missingChunks);
            }

            // 合并文件
            File outputFile = new File(uploadPath, fileName);
            try (FileOutputStream fos = new FileOutputStream(outputFile)) {
                for (int i = 0; i < totalChunks; i++) {
                    byte[] chunkData = (byte[]) allData.get("chunk_" + i);
                    fos.write(chunkData);
                }
            }

            // 清理Redis数据
            redisTemplate.delete(key);

            return ResponseEntity.ok()
                    .body(Collections.singletonMap("url", "/download/" + fileName));
        }

        /**
         * 计算文件哈希值
         * @param fileChunk
         * @return
         * @throws Exception
         */
        public static String calculateHash(byte[] fileChunk) throws Exception {
            MessageDigest md = MessageDigest.getInstance("SHA-256");
            md.update(fileChunk);
            byte[] hash = md.digest();
            StringBuilder hexString = new StringBuilder();
            for (byte b : hash) {
                hexString.append(String.format("%02x", b));
            }
            return hexString.toString();
        }
    }
相关推荐
凌冰_2 小时前
IDEA2025 搭建Web并部署到Tomcat运行Servlet+Thymeleaf
java·servlet·tomcat
Seven972 小时前
剑指offer-53、表达数值的字符串
java
木楚2 小时前
在idea中构建传统ssm框架的步骤和方式
java·ide·intellij-idea
董世昌412 小时前
JavaScript 中 undefined 和 not defined 的区别
java·服务器·javascript
Lisonseekpan2 小时前
Kafka、ActiveMQ、RabbitMQ、RocketMQ对比
java·后端·kafka·rabbitmq·rocketmq·activemq
我是华为OD~HR~栗栗呀2 小时前
(华为od)21届-Python面经
java·前端·c++·python·华为od·华为·面试
夕阳下的一片树叶9132 小时前
后端java遇到的问题
java·开发语言
CodeAmaz2 小时前
RocketMQ整体工作流程_详解
java·rocketmq·rocketmq整体流程
刘一说2 小时前
ES6+核心特性全面浅析
java·前端·es6