大文件秒传,分片上传,断点续传

大文件分片上传

一 功能描述

1.文件通过web端分片多线程上传到服务端,然后web端发起分片合并,完成大文件分片上传功能

2.上传过的大文件,实现秒传

3.上传过程中,服务异常退出,实现断点续传

二 流程图

三 代码运行展示

1.分片上传

2.秒传

3.断点续传


四 代码结构

五 技术栈

1.springboot

2.aws存储

3.mysql

六 代码片段

复制代码
 @ApiOperation(value = "大文件上传")
    @PostMapping(value = "/big/upload",consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
    public RestResp<String> uploadChunk(
            HttpServletRequest request,
            BigFileChunkFo bigFileChunkFo, MultipartFile file) {
        log.debug("分片上传参数:{}",bigFileChunkFo);
        String result = fileManager.uploadChunk(request,bigFileChunkFo,file);
        return RestResp.success(result);
    }



    @ApiOperation(value = "检查是否上传")
    @PostMapping("/big/checkMd5")
    public RestResp<BigFileCheckDto> checkMd5(
            HttpServletRequest request,
            @RequestBody BigFileCheckFo fileCheckFo) {
        BigFileCheckDto bigFileCheckDto = fileManager.checkMd5(request,fileCheckFo);
        return RestResp.success(bigFileCheckDto);
    }

    @ApiOperation(value = "大文件合并")
    @PostMapping("/big/merge")
    public RestResp<String> merge(
            HttpServletRequest request,
            @RequestBody BigFileMergeFo bigFileMergeFo) {
        log.debug("文件合并:{}",bigFileMergeFo);
        String result = fileManager.merge(request,bigFileMergeFo);
        return RestResp.success(result);
    }

    @Override
    public String copyFile(String bucketName, String sourceFileKey, String targetFileKey) throws Exception {
        log.info("bucketName:{},sourceFileKey:{},targetFileKey:{}",bucketName,sourceFileKey,targetFileKey);
        CopyObjectRequest copyObjectRequest = new CopyObjectRequest();
        copyObjectRequest.setSourceBucketName(bucketName);
        copyObjectRequest.setDestinationBucketName(bucketName);
        copyObjectRequest.setSourceKey(sourceFileKey);
        copyObjectRequest.setDestinationKey(targetFileKey);
        getClient().copyObject(copyObjectRequest);
        return this.getObjectPrefixUrl(bucketName) +  targetFileKey;
}

    /**
     * 上传分片
     *
     * @param bigFileChunkFo
     * @return
     * @throws Exception
     */
    @Override
    public PartETag uploadChunk(BigFileChunkFo bigFileChunkFo, BigFile bigFile, MultipartFile multipartFile) throws Exception {
        //桶名
        String bucketName = bigFileChunkFo.getBucketName();
        //当前分片
        int chunkNum = bigFileChunkFo.getChunkNum();
        //当前分片大小
        long chunkSize = bigFileChunkFo.getChunkSize();
        // 上传
        ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(IOUtils
                .toByteArray(multipartFile.getInputStream()));
        UploadPartRequest uploadRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(bigFile.getFileKey())
                .withUploadId(bigFile.getUploadId())
                .withInputStream(byteArrayInputStream)
                .withPartNumber(chunkNum)
                .withPartSize(chunkSize);
        UploadPartResult uploadResult = getClient().uploadPart(uploadRequest);
        return uploadResult.getPartETag();
    }

    /**
     * 获取上传任务id
     *
     * @param bigFileCheckFo
     * @param fileKey
     * @return
     */
    @Override
    public String getUploadId(BigFileCheckFo bigFileCheckFo, String fileKey) {
        String bucketName = bigFileCheckFo.getBucketName();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName
                , fileKey);
        log.info("initRequest:{}", initRequest);
        InitiateMultipartUploadResult initResponse = getClient().initiateMultipartUpload(initRequest);
        return initResponse.getUploadId();
    }

    /**
     * 大文件合并
     *
     * @param bigFile
     * @param bigFileChunkList
     * @return
     * @throws Exception
     */
    @Override
    public String merge(BigFile bigFile, List<BigFileChunk> bigFileChunkList) throws Exception {
        String bucketName = bigFile.getBucketName();
        List<PartETag> partETagList = bigFileChunkList
                .stream()
                .map(bigFileChunk -> new PartETag(bigFileChunk.getChunkNum(), bigFileChunk.getETag()))
                .collect(Collectors.toList());
        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, bigFile.getFileKey(),
                bigFile.getUploadId(), partETagList);
        log.info("compRequest:{}", compRequest);
        getClient().completeMultipartUpload(compRequest);
        return this.getObjectPrefixUrl(bucketName) + bigFile.getFileKey();
    }

七 表设计

sql 复制代码
CREATE TABLE `hfle_big_file` (
  `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT '主键id',
  `md5` varchar(128) COLLATE utf8mb4_general_ci NOT NULL COMMENT '文件MD5',
  `status` int NOT NULL DEFAULT '1' COMMENT '上传状态,1:上传中;2:上传完成',
  `access_key` varchar(64) COLLATE utf8mb4_general_ci NOT NULL COMMENT 'accessKey',
  `chunk_count` bigint DEFAULT NULL COMMENT '分片总数',
  `file_name` varchar(240) COLLATE utf8mb4_general_ci NOT NULL COMMENT '文件名称',
  `file_size` bigint DEFAULT NULL COMMENT '文件大小',
  `bucket_name` varchar(64) COLLATE utf8mb4_general_ci NOT NULL COMMENT '上传桶',
  `file_type` varchar(128) COLLATE utf8mb4_general_ci NOT NULL COMMENT '文件类型',
  `file_key` varchar(128) COLLATE utf8mb4_general_ci NOT NULL COMMENT '文件唯一值',
  `url` varchar(256) COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT '上传地址',
  `upload_id` varchar(128) COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT 's3上传任务id',
  `create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
  `creator` bigint NOT NULL DEFAULT '-1' COMMENT '创建人',
  `modifier` bigint NOT NULL DEFAULT '-1' COMMENT '最后更新人',
  `modified_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '更新时间',
  PRIMARY KEY (`id`) USING BTREE,
  UNIQUE KEY `uniq_md5_access_key` (`access_key`, `md5`, `bucket_name`, `file_key`)
) ENGINE = InnoDB AUTO_INCREMENT = 47 DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_general_ci COMMENT = '大文件上传记录表';


CREATE TABLE `hfle_big_chunck` (
  `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT '主键id',
  `big_file_id` bigint DEFAULT NULL COMMENT '大文件id',
  `chunk_num` bigint DEFAULT NULL COMMENT '当前分片',
  `e_tag` varchar(64) COLLATE utf8mb4_general_ci DEFAULT NULL COMMENT 's3上传专用',
  `chunk_size` bigint DEFAULT NULL COMMENT '分片大小',
  `create_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
  `creator` bigint NOT NULL DEFAULT '-1' COMMENT '创建人',
  `modifier` bigint NOT NULL DEFAULT '-1' COMMENT '最后更新人',
  `modified_time` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '更新时间',
  PRIMARY KEY (`id`) USING BTREE,
  UNIQUE KEY `uniq_chunk_num` (`big_file_id`, `chunk_num`)
) ENGINE = InnoDB AUTO_INCREMENT = 1542 DEFAULT CHARSET = utf8mb4 COLLATE = utf8mb4_general_ci COMMENT = '大文件上传分片记录表'

八 启动访问地址

http://localhost:9999

九 源代码下载

源码下载

相关推荐
大模型玩家七七20 分钟前
基于语义切分 vs 基于结构切分的实际差异
java·开发语言·数据库·安全·batch
寻星探路5 小时前
【深度长文】万字攻克网络原理:从 HTTP 报文解构到 HTTPS 终极加密逻辑
java·开发语言·网络·python·http·ai·https
曹牧8 小时前
Spring Boot:如何测试Java Controller中的POST请求?
java·开发语言
爬山算法8 小时前
Hibernate(90)如何在故障注入测试中使用Hibernate?
java·后端·hibernate
kfyty7259 小时前
集成 spring-ai 2.x 实践中遇到的一些问题及解决方案
java·人工智能·spring-ai
猫头虎9 小时前
如何排查并解决项目启动时报错Error encountered while processing: java.io.IOException: closed 的问题
java·开发语言·jvm·spring boot·python·开源·maven
李少兄9 小时前
在 IntelliJ IDEA 中修改 Git 远程仓库地址
java·git·intellij-idea
忆~遂愿9 小时前
ops-cv 算子库深度解析:面向视觉任务的硬件优化与数据布局(NCHW/NHWC)策略
java·大数据·linux·人工智能
小韩学长yyds9 小时前
Java序列化避坑指南:明确这4种场景,再也不盲目实现Serializable
java·序列化