最近项目中需要做大文件上传,几十上百G的这种,并且要支持断点续传,存储使用的minio。
先说说为什么不直接使用minioweb页面上传,原因有以下几点:
1、minio的web页面需要先使用ak sk登录上传,这个ak sk绝对不允许任何人都知道
2、minio的web页面登录的cookie有超时,如果文件太大,可能会出现超时
3、minio的web页面上传不支持断点续传
所以呢就自己实现了一个基于minio实现的分片,并且支持断点续传的功能,总体思路如下:
1、前端调用初始化接口,获取到一个唯一标识符,代表一个批次,也可以理解为是为这个文件生成的唯一标识
2、前端将文件分片,并发调用上传接口
2、后端接收到分片数据之后直接上传到minio,同时为每个分片生成一个唯一的md5值存入数据库
3、前端上传完所有的分片之后,调用合并接口,将分片进行合并成一个完整的文件
4、断点续传实现,前端在上传第一个分片时校验md5值,如果能够从数据库查询到,就说明之前已经上传过,返回批次号,前端拿到批次号继续上传从第2个分片开始以后得所有分片,后端接收到上传的分片之后,都拿md5值去数据库查询,如果存在说明该分片已经上传成功过了,直接跳过,否则按正常上传处理
废话不多说,直接上代码:
<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="utf-8" />
<title>MinIO 分片上传测试页</title>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<style>
body { font-family: system-ui, Arial, sans-serif; margin: 24px; }
.row { margin-bottom: 12px; }
label { display: inline-block; min-width: 160px; }
input[type="number"] { width: 140px; }
button { margin-right: 8px; }
#log { white-space: pre-wrap; border: 1px solid #ddd; padding: 12px; min-height: 180px; background: #fafafa; }
progress { width: 420px; height: 18px; }
</style>
</head>
<body>
<h2>MinIO 分片上传(前端分片,后端合并)</h2>
<div class="row">
<label>选择文件:</label>
<input id="fileInput" type="file" />
</div>
<div class="row">
<label>分片大小(MB):</label>
<input id="chunkSizeMb" type="number" min="1" value="64" />
</div>
<div class="row">
<label>并发上传数:</label>
<input id="concurrency" type="number" min="1" value="2" />
</div>
<div class="row">
<button id="btnStart">开始上传</button>
<button id="btnAbort" disabled>取消上传</button>
</div>
<div class="row">
<label>总体进度:</label>
<progress id="overallProgress" value="0" max="100"></progress>
<span id="overallText">0%</span>
</div>
<h3>日志</h3>
<div id="log"></div>
<script>
const logEl = document.getElementById('log');
const fileInput = document.getElementById('fileInput');
const chunkSizeMbEl = document.getElementById('chunkSizeMb');
const concurrencyEl = document.getElementById('concurrency');
const btnStart = document.getElementById('btnStart');
const btnAbort = document.getElementById('btnAbort');
const overallProgress = document.getElementById('overallProgress');
const overallText = document.getElementById('overallText');
let abortFlag = false;
let currentPrefix = '';
let currentUploadId = '';
/**
* 追加一行日志
*/
function log(msg) {
const time = new Date().toLocaleTimeString();
logEl.textContent += `[${time}] ${msg}\n`;
logEl.scrollTop = logEl.scrollHeight;
}
/**
* 发起后端初始化,获取 uploadId 与 prefix
*/
async function initUpload(fileName) {
const params = new URLSearchParams({ fileName });
const resp = await fetch('/api/datacenter/minio/multipart/init', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: params.toString()
});
if (!resp.ok) throw new Error('init 请求失败: ' + resp.status);
const data = await resp.json();
if (data.code !== 0) throw new Error('init 返回失败: ' + (data.msg || ''));
return data.data; // { uploadId, prefix, bucket }
}
/**
* 上传单个分片
*/
async function uploadChunk(prefix, partNumber, blob, contentType, uploadId, fileName, totalPart) {
const fd = new FormData();
fd.append('prefix', prefix);
fd.append('partNumber', String(partNumber));
if (contentType) fd.append('contentType', contentType);
fd.append('chunk', blob, `part-${partNumber}`);
fd.append('uploadId', uploadId);
fd.append('fileName', fileName);
fd.append('totalPart', String(totalPart));
const resp = await fetch('/api/datacenter/minio/multipart/upload', { method: 'POST', body: fd });
const json = await resp.json();
if (!resp.ok || json.code !== 0) {
throw new Error('分片上传失败: ' + (json.msg || resp.status));
}
return json.data;
}
/**
* 具有重试机制的分片上传,失败最多重试3次,超过后跳过该分片
*/
async function uploadChunkWithRetry(prefix, partNumber, blob, contentType, uploadId, fileName, totalPart, maxRetries = 3) {
let attempt = 0;
while (attempt < maxRetries) {
try {
const data = await uploadChunk(prefix, partNumber, blob, contentType, uploadId, fileName, totalPart);
return data;
} catch (err) {
attempt++;
log(`分片#${partNumber} 上传失败(第${attempt}次):${err.message}`);
if (attempt < maxRetries) {
await new Promise(r => setTimeout(r, 1000 * attempt));
}
}
}
log(`分片#${partNumber} 连续${maxRetries}次失败,已跳过`);
return null;
}
/**
* 请求合并所有分片为最终对象
*/
async function completeUpload(prefix, fileName, contentType, uploadId) {
const params = new URLSearchParams({ prefix, fileName, contentType, uploadId });
const resp = await fetch('/api/datacenter/minio/multipart/complete', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: params.toString()
});
const json = await resp.json();
if (!resp.ok || json.code !== 0) {
throw new Error('合并失败: ' + (json.msg || resp.status));
}
return json.data; // { bucket, object, url }
}
/**
* 取消上传,删除前缀下的所有分片
*/
async function abortUpload(prefix) {
const params = new URLSearchParams({ prefix });
const resp = await fetch('/api/datacenter/minio/multipart/abort', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: params.toString()
});
const json = await resp.json();
if (!resp.ok || json.code !== 0) {
throw new Error('取消失败: ' + (json.msg || resp.status));
}
}
/**
* 将文件切片并按并发上传
*/
async function startUpload() {
abortFlag = false;
const file = fileInput.files?.[0];
if (!file) { log('请先选择文件'); return; }
const chunkSize = Math.max(1, Number(chunkSizeMbEl.value) || 5) * 1024 * 1024;
const concurrency = Math.max(1, Number(concurrencyEl.value) || 1);
log(`开始上传:文件=${file.name} 大小=${file.size}B 分片大小=${chunkSize} 并发=${concurrency}`);
// 1. init
const init = await initUpload(file.name);
currentPrefix = init.prefix;
currentUploadId = init.uploadId;
btnAbort.disabled = false;
log(`初始化成功:prefix=${init.prefix}`);
// 2. 切片
const parts = [];
let start = 0; let partNumber = 1;
while (start < file.size) {
const end = Math.min(file.size, start + chunkSize);
const blob = file.slice(start, end);
parts.push({ partNumber, blob });
start = end; partNumber++;
}
overallProgress.value = 0; overallProgress.max = parts.length; overallText.textContent = `0/${parts.length}`;
// 3. 上传第一个分片检测续传
let completed = 0;
if (parts.length > 0) {
const first = parts[0];
log(`正在上传第1块以检测是否需要续传...`);
// 注意:这里使用 currentPrefix/currentUploadId
const res = await uploadChunkWithRetry(currentPrefix, first.partNumber, first.blob, file.type, currentUploadId, file.name, parts.length);
if (!res) {
log('第1块上传失败,终止');
return;
}
if (res.uploadId && res.uploadId !== currentUploadId) {
log(`检测到断点续传,切换 uploadId: ${currentUploadId} -> ${res.uploadId}`);
log(`切换 prefix: ${currentPrefix} -> ${res.prefix}`);
currentUploadId = res.uploadId;
currentPrefix = res.prefix;
} else {
log(`第1块上传成功`);
}
completed++;
overallProgress.value = completed; overallText.textContent = `${completed}/${parts.length}`;
}
// 4. 并发上传剩余分片
let idx = 1; // 从第2块开始
const failedParts = [];
async function worker() {
while (idx < parts.length && !abortFlag) {
const current = parts[idx++];
// 务必使用最新的 currentPrefix 和 currentUploadId
const ok = await uploadChunkWithRetry(currentPrefix, current.partNumber, current.blob, file.type, currentUploadId, file.name, parts.length);
if (ok) {
completed++;
overallProgress.value = completed; overallText.textContent = `${completed}/${parts.length}`;
log(`分片完成:#${current.partNumber}`);
} else {
failedParts.push(current.partNumber);
}
}
}
const workers = Array.from({ length: concurrency }, () => worker());
await Promise.all(workers);
if (abortFlag) { log('上传已取消'); return; }
// 5. 完成合并
if (failedParts.length) {
log(`共有 ${failedParts.length} 个分片上传失败:${failedParts.join(', ')},仍尝试合并,后端可能返回失败提示`);
}
const result = await completeUpload(currentPrefix, file.name, file.type, currentUploadId);
log(`合并完成:bucket=${result.bucket} object=${result.object}`);
log(`访问链接:${result.url}`);
btnAbort.disabled = true;
}
btnStart.addEventListener('click', () => {
startUpload().catch(err => { log('错误:' + err.message); btnAbort.disabled = true; });
});
btnAbort.addEventListener('click', () => {
abortFlag = true;
btnAbort.disabled = true;
if (currentPrefix) {
abortUpload(currentPrefix).then(() => log('已取消并清理分片')).catch(err => log('取消失败:' + err.message));
}
});
</script>
</body>
</html>
前端注意两个事项:
1、第一个分片一定是同步上传,也就是要等它上传完了才能开启并发上传后续的分片
2、并发不能开太多,好像是说对于同一个域名,浏览器最多只能开启6个并发,这个我没有去测试验证过
后端数据库表:一个是记录每个分片信息的表,另一个是合并记录表,一旦合并记录表有数据,就说明整个文件正常上传、合并完成
create table "t_large_file_upload"
(
id bigint not null,
file_name varchar(512) not null,
upload_id varchar(255) not null,
part integer not null,
flag smallint default 1 not null,
total_part integer not null,
prefix varchar(255) not null,
md5 varchar(128),
constraint _large_file_PRIMARY_KEY primary key (id)
) tablespace pg_default;
comment on table t_large_file_upload is '大文件分片上传记录';
comment on column t_large_file_upload.file_name is '文件名称';
comment on column t_large_file_upload.upload_id is '上传的批次id,也是存储路径';
comment on column t_large_file_upload.part is '分片编号';
comment on column t_large_file_upload.flag is '上传成功标识默认1成功';
comment on column t_large_file_upload.total_part is '总分片数';
comment on column t_large_file_upload.prefix is '路径前缀';
comment on column t_large_file_upload.md5 is '分片md5值';
alter table "t_large_file_upload" owner to postgres;
create table "t_large_file_compose"
(
id bigint not null,
prefix varchar(512) not null,
file_name varchar(255) not null,
upload_id varchar(255) not null,
total_part integer not null,
constraint file_compose_PRIMARY_KEY primary key (id)
) tablespace pg_default;
comment on table t_large_file_compose is '大文件上传合并结果';
comment on column t_large_file_compose.prefix is '存储的key前缀';
comment on column t_large_file_compose.file_name is '合并结果文件名称';
comment on column t_large_file_compose.upload_id is '上传批次';
comment on column t_large_file_compose.total_part is '总分片数';
alter table "t_large_file_compose" owner to postgres;
controller接口层:
package com.pig4cloud.pigx.data.center.controller;
import cn.hutool.core.util.IdUtil;
import cn.hutool.crypto.digest.DigestUtil;
import com.alibaba.fastjson.JSON;
import com.pig4cloud.pigx.common.core.util.CommonBuilder;
import com.pig4cloud.pigx.common.core.util.HttpRequestUtil;
import com.pig4cloud.pigx.common.core.util.R;
import com.pig4cloud.pigx.data.center.common.config.properties.MinioProperties;
import com.pig4cloud.pigx.data.center.common.util.MinioUtils;
import com.pig4cloud.pigx.data.center.entity.LargeFileCompose;
import com.pig4cloud.pigx.data.center.entity.LargeFileUpload;
import com.pig4cloud.pigx.data.center.service.LargeFileComposeService;
import com.pig4cloud.pigx.data.center.service.LargeFileUploadService;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.multipart.MultipartFile;
import javax.annotation.PostConstruct;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;
@RestController
@RequestMapping("/minio/multipart")
@Slf4j
public class MinioMultipartController {
@Autowired
private MinioProperties minioProperties;
@Autowired
private LargeFileUploadService largeFileUploadService;
@Autowired
private LargeFileComposeService largeFileComposeService;
private final static String PART = ".part";
/**
* 初始化minio
*
* @return
*/
@PostConstruct
public void initMinio() {
MinioUtils.init(minioProperties.getHost(), minioProperties.getEndPoint(), minioProperties.getBucket(), minioProperties.getAccessKey(), minioProperties.getSecretKey());
}
/**
* 初始化分片上传,会返回 uploadId 与前缀路径,前端按该前缀存放各分片
*/
@PostMapping("/init")
public R<Map<String, String>> init(@RequestParam("fileName") String fileName) {
String uploadId = UUID.randomUUID().toString().replace("-", "");
String date = DateTimeFormatter.ofPattern("yyyyMMdd").format(LocalDateTime.now());
String prefix = String.format("multipart/%s/%s/", date, uploadId);
try {
// 在 MinIO 中创建一个空目录对象,便于后续按前缀查询
MinioUtils.putDirObject(MinioUtils.getBucketName(), prefix);
} catch (Exception e) {
log.warn("init create dir failed, but continue. prefix={}", prefix, e);
}
Map<String, String> resp = new HashMap<>();
resp.put("uploadId", uploadId);
resp.put("prefix", prefix);
resp.put("bucket", MinioUtils.getBucketName());
return R.ok(resp);
}
/**
* 接收前端已分好的分片,直接上传到 MinIO 对象存储
*/
@PostMapping("/upload")
public R<Map<String, String>> uploadPart(@RequestParam("prefix") String prefix,
@RequestParam("partNumber") Integer partNumber,
@RequestParam(value = "contentType", required = false) String contentType,
@RequestParam("chunk") MultipartFile chunk,
@RequestParam String uploadId,
@RequestParam String fileName,
@RequestParam("totalPart") Integer totalPart
) {
if (StringUtils.isBlank(prefix) || partNumber == null || chunk == null || chunk.isEmpty() || StringUtils.isBlank(uploadId) || StringUtils.isBlank(fileName)) {
return R.failed("参数不完整");
}
LargeFileUpload largeFileUpload = CommonBuilder.of(LargeFileUpload.class)
.with(LargeFileUpload::setId, IdUtil.getSnowflakeNextId())
.with(LargeFileUpload::setFileName, fileName)
.with(LargeFileUpload::setUploadId, uploadId)
.with(LargeFileUpload::setPart, partNumber)
.with(LargeFileUpload::setTotalPart, totalPart)
.with(LargeFileUpload::setPrefix, prefix)
.build();
String objectName = prefix + partNumber + PART;
try {
byte[] bytes = chunk.getBytes();
String md5 = DigestUtil.md5Hex(bytes);
largeFileUpload.setMd5(md5);
LargeFileUpload one = largeFileUploadService.getOne(md5, partNumber);
if (one == null || 0 == one.getFlag()) {//第一次上传或者上传失败
MinioUtils.putObject(MinioUtils.getBucketName(), bytes, objectName,
StringUtils.defaultIfBlank(contentType, "application/octet-stream"));
largeFileUpload.setFlag(1);
largeFileUploadService.save(largeFileUpload);
Map<String, String> result = new HashMap<>();
result.put("uploadId", uploadId);
result.put("prefix", prefix);
return R.ok(result);
}
Map<String, String> result = new HashMap<>();
result.put("uploadId", one.getUploadId());
result.put("prefix", one.getPrefix());
return R.ok(result);
} catch (Exception e) {
log.error("uploadPart failed, objectName={}", objectName, e);
largeFileUpload.setFlag(0);
largeFileUploadService.save(largeFileUpload);
return R.failed("上传分片失败");
}
}
/**
* 完成分片合并,服务端将所有分片按 partNumber 顺序合并为最终对象
*/
@PostMapping("/complete")
public R<Map<String, String>> complete(@RequestParam("prefix") String prefix,
@RequestParam("fileName") String fileName,
@RequestParam(value = "contentType", required = false) String contentType,
@RequestParam String uploadId) {
if (StringUtils.isBlank(prefix) || StringUtils.isBlank(fileName) || StringUtils.isBlank(uploadId)) {
return R.failed("参数不完整");
}
String bucket = MinioUtils.getBucketName();
try {
List<LargeFileUpload> byUploadId = largeFileUploadService.getByUploadId(uploadId);
if (CollectionUtils.isNotEmpty(byUploadId)) {
Integer totalPart = byUploadId.get(0).getTotalPart();
long size = byUploadId.parallelStream().filter(o -> 1 == o.getFlag()).count();
if (size != totalPart) {
List<Integer> parts = byUploadId.parallelStream().filter(o -> 0 == o.getFlag()).map(LargeFileUpload::getPart).toList();
return R.failed("有上传失败的分片:" + JSON.toJSONString(parts));
}
}
// 列出该前缀下的所有分片对象
List<String> parts = byUploadId.stream()
.sorted(java.util.Comparator.comparing(LargeFileUpload::getPart))
.map(u -> prefix + u.getPart() + PART)
.toList();
if (parts.isEmpty()) {
return R.failed("未找到任何分片");
}
// 目标对象路径
String targetObject = prefix + fileName;
CompletableFuture
.runAsync(() -> {
try {
MinioUtils.composeObject(bucket, targetObject, parts,
StringUtils.defaultIfBlank(contentType, "application/octet-stream"));
} catch (Exception e) {
throw new RuntimeException(e);
}
// 仅在上面未抛异常的情况下继续
MinioUtils.removeObjects(bucket, parts);
log.info("compose_complete...");
})
.thenRun(() -> {
LargeFileCompose largeFileCompose = CommonBuilder.of(LargeFileCompose.class)
.with(LargeFileCompose::setId, IdUtil.getSnowflakeNextId())
.with(LargeFileCompose::setFileName, fileName)
.with(LargeFileCompose::setPrefix, prefix)
.with(LargeFileCompose::setUploadId, uploadId)
.with(LargeFileCompose::setTotalPart, parts.size())
.build();
largeFileComposeService.save(largeFileCompose);
}).thenRun(() -> {
String unzipUrl = minioProperties.getUnzipUrl();
if (StringUtils.isNotBlank(unzipUrl) && fileName.endsWith(".zip")) {
String unzipObject = minioProperties.getShareFolderPath() + "/" + minioProperties.getBucket() + "/" + targetObject;
String s = HttpRequestUtil.doPost(unzipUrl, Collections.EMPTY_MAP, JSON.toJSONString(Map.of("path", unzipObject)));
log.info("unzip result: {}", s);
}
})
.exceptionally(ex -> {
log.error("compose failed, keep parts for retry. prefix={}, fileName={}", prefix, fileName, ex);
return null;
});
Map<String, String> resp = new HashMap<>();
resp.put("bucket", bucket);
resp.put("object", targetObject);
resp.put("url", MinioUtils.getVisitUrl() + MinioUtils.encodeObjectPath(targetObject));
return R.ok(resp);
} catch (Exception e) {
log.error("complete compose failed, prefix={}, fileName={}", prefix, fileName, e);
return R.failed("合并失败");
}
}
/**
* 取消上传:删除所有已上传的分片
*/
@PostMapping("/abort")
public R<Boolean> abort(@RequestParam("prefix") String prefix) {
if (StringUtils.isBlank(prefix)) {
return R.failed("参数不完整");
}
try {
List<String> parts = MinioUtils.getAllObjectsByPrefix(MinioUtils.getBucketName(), prefix, true).stream()
.map(item -> item.objectName())
.collect(Collectors.toList());
if (!parts.isEmpty()) {
MinioUtils.removeObjects(MinioUtils.getBucketName(), parts);
}
return R.ok(true);
} catch (Exception e) {
log.error("abort failed, prefix={}", prefix, e);
return R.failed("取消失败");
}
}
/**
* 提取分片序号(按对象名末尾的数字部分),示例:prefix/12.part -> 12
*/
static int extractPartNumber(String objectName) {
try {
int idx = objectName.lastIndexOf('/');
String last = idx >= 0 ? objectName.substring(idx + 1) : objectName;
if (last.endsWith(PART)) {
last = last.substring(0, last.length() - 5);
}
return Integer.parseInt(last);
} catch (Exception e) {
return 0;
}
}
}
service:
package com.pig4cloud.pigx.data.center.service;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.pig4cloud.pigx.data.center.entity.LargeFileUpload;
import com.pig4cloud.pigx.data.center.mapper.LargeFileUploadMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
import java.util.List;
@Service
public class LargeFileUploadService {
@Autowired
private LargeFileUploadMapper largeFileUploadMapper;
/**
* 保存上传记录
*/
@Transactional
public void save(LargeFileUpload largeFileUpload) {
LambdaQueryWrapper<LargeFileUpload> wrapper = new LambdaQueryWrapper<>();
wrapper.eq(LargeFileUpload::getUploadId, largeFileUpload.getUploadId());
wrapper.eq(LargeFileUpload::getPart, largeFileUpload.getPart());
LargeFileUpload largeFileUpload1 = largeFileUploadMapper.selectOne(wrapper);
if (largeFileUpload1 != null) {
largeFileUpload1.setFlag(largeFileUpload.getFlag());
largeFileUploadMapper.updateById(largeFileUpload1);
} else {
largeFileUploadMapper.insert(largeFileUpload);
}
}
/**
* 根据uploadid查询所有的成功分片
*/
public List<LargeFileUpload> getByUploadId(String uploadId) {
LambdaQueryWrapper<LargeFileUpload> wrapper = new LambdaQueryWrapper<>();
wrapper.eq(LargeFileUpload::getUploadId, uploadId);
return largeFileUploadMapper.selectList(wrapper);
}
/**
* 根据uploadid和part查询是否上传成功--前端超时,后端成功
*/
public LargeFileUpload getOne(String md5, Integer part) {
LambdaQueryWrapper<LargeFileUpload> wrapper = new LambdaQueryWrapper<>();
wrapper.eq(LargeFileUpload::getMd5, md5);
wrapper.eq(LargeFileUpload::getPart, part);
LargeFileUpload largeFileUpload = largeFileUploadMapper.selectOne(wrapper);
return largeFileUpload;
}
}
package com.pig4cloud.pigx.data.center.service;
import com.pig4cloud.pigx.data.center.entity.LargeFileCompose;
import com.pig4cloud.pigx.data.center.mapper.LargeFileComposeMapper;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;
@Service
public class LargeFileComposeService {
@Autowired
private LargeFileComposeMapper largeFileComposeMapper;
/**
* 保存上传记录
*/
@Transactional
public void save(LargeFileCompose largeFileCompose) {
largeFileComposeMapper.insert(largeFileCompose);
}
}
配置类:
package com.pig4cloud.pigx.data.center.common.config.properties;
import lombok.Data;
import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
/**
* MinIO 配置属性
*
* @author das
*/
@Component
@Data
@ConfigurationProperties(prefix = "oss.minio")
public class MinioProperties {
/**
* minio对外暴露的共享文件夹路径,用于发布shp数据和影像数据
*/
private String shareFolderPath;
/**
* 存储桶bucket名称
*/
private String bucket;
/**
* MinIO服务地址
*/
private String endPoint;
/**
* 访问key
*/
private String accessKey;
/**
* 访问密钥key
*/
private String secretKey;
/**
* 前缀
*/
private String urlPrefix;
/**
* signatureExpire
*/
private Integer signatureExpire;
/**
* 自定义域名
*/
private String customDomain;
/**
* host
* @param urlPrefix
*/
private String host;
/**
* 解压url
* @param urlPrefix
*/
private String unzipUrl;
public void setUrlPrefix(String urlPrefix) {
this.urlPrefix = urlPrefix.endsWith("/") ? StringUtils.substring(urlPrefix, 0, -1) : urlPrefix;
}
public String getCustomAccessUrl(String objectKey, String mainFile) {
String mainFileStr = StringUtils.isEmpty(mainFile) ? "" : "/" + mainFile;
//objectKey最后不是以扩展名结尾的话,拼上mainFileStr,因为视频,图片之类的文件objectkey是带了文件名和扩展名的,模型类没有名称和扩展名,objectkey只是文件夹路径
if (!objectKey.contains(".")) {
return String.format("%s/%s%s", getUrlPrefix(), objectKey, mainFileStr);
} else {//objectkey是以点号结尾,也就是文件名里面带了点号,如:2025-12-04/3dtiles/1996395385102548992/5.天云楼,要判断mainFile是否是tileset.json,是则拼上mainFileStr
if ("tileset.json".equals(mainFile) || "model.gltf".equals(mainFile)) {
return String.format("%s/%s%s", getUrlPrefix(), objectKey, mainFileStr);
}
}
return String.format("%s/%s", getUrlPrefix(), objectKey);
}
}
minio工具类:
package com.pig4cloud.pigx.data.center.common.util;
import io.minio.*;
import io.minio.messages.DeleteObject;
import io.minio.messages.Item;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.springframework.web.multipart.MultipartFile;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.TimeUnit;
@Slf4j
public class MinioUtils {
private static MinioClient minioClient;
private static String visitUrl;
private static String endpoint;
private static String bucketName;
private static String accessKey;
private static String secretKey;
public static final String SEPARATOR = "/";
private MinioUtils() {
}
public static void init(String visitUrl, String endpoint, String bucketName, String accessKey, String secretKey) {
MinioUtils.visitUrl = visitUrl;
MinioUtils.endpoint = endpoint;
MinioUtils.bucketName = bucketName;
MinioUtils.accessKey = accessKey;
MinioUtils.secretKey = secretKey;
createMinIoClient();
}
public static String getBucketName() {
return bucketName;
}
private static void createMinIoClient() {
try {
if (null == minioClient) {
log.info("minioClient create start......");
minioClient = MinioClient.builder().endpoint(endpoint).credentials(accessKey, secretKey).build();
log.info("minioClient create end........");
}
} catch (Exception e) {
log.error("minioClient init fail:", e);
}
}
public static String getBasisUrl() {
return endpoint + SEPARATOR + bucketName + SEPARATOR;
}
public static String getVisitUrl() {
return visitUrl + SEPARATOR + bucketName + SEPARATOR;
}
public static boolean doesObjectExist(String bucketName, String objectName) {
boolean exist = true;
try {
minioClient.statObject(StatObjectArgs.builder().bucket(bucketName).object(objectName).build());
} catch (Exception e) {
exist = false;
}
return exist;
}
public static boolean doesFolderExist(String bucketName, String objectName) {
boolean exist = false;
try {
Iterable<Result<Item>> results = minioClient.listObjects(
ListObjectsArgs.builder().bucket(bucketName).prefix(objectName).recursive(false).build());
for (Result<Item> result : results) {
Item item = result.get();
if (item.isDir() && objectName.equals(item.objectName())) {
exist = true;
}
}
} catch (Exception e) {
exist = false;
}
return exist;
}
public static List<Item> getAllObjectsByPrefix(String bucketName, String prefix, boolean recursive) throws Exception {
List<Item> list = new ArrayList<>();
Iterable<Result<Item>> objectsIterator = minioClient.listObjects(
ListObjectsArgs.builder().bucket(bucketName).prefix(prefix).recursive(recursive).build());
if (objectsIterator != null) {
for (Result<Item> o : objectsIterator) {
Item item = o.get();
list.add(item);
}
}
return list;
}
public static InputStream getObject(String bucketName, String objectName) throws Exception {
return minioClient.getObject(GetObjectArgs.builder().bucket(bucketName).object(objectName).build());
}
public InputStream getObject(String bucketName, String objectName, long offset, long length) throws Exception {
return minioClient.getObject(GetObjectArgs.builder().bucket(bucketName).object(objectName).offset(offset).length(length).build());
}
public static Iterable<Result<Item>> listObjects(String bucketName, String prefix, boolean recursive) {
return minioClient.listObjects(ListObjectsArgs.builder().bucket(bucketName).prefix(prefix).recursive(recursive).build());
}
public static ObjectWriteResponse putObject(String bucketName, MultipartFile file, String objectName, String contentType) throws Exception {
InputStream inputStream = file.getInputStream();
return minioClient.putObject(
PutObjectArgs.builder()
.bucket(bucketName)
.object(objectName)
.contentType(contentType)
.stream(inputStream, file.getSize(), -1)
.build()
);
}
public static ObjectWriteResponse putObject(String bucketName, byte[] bytes, String objectName, String contentType) throws Exception {
InputStream inputStream = new ByteArrayInputStream(bytes);
return minioClient.putObject(
PutObjectArgs.builder()
.bucket(bucketName)
.object(objectName)
.contentType(contentType)
.stream(inputStream, bytes.length, -1)
.build()
);
}
public static ObjectWriteResponse putObject(String bucketName, String objectName, String fileName) throws Exception {
return minioClient.uploadObject(
UploadObjectArgs.builder().bucket(bucketName).object(objectName).filename(fileName).build()
);
}
public static ObjectWriteResponse putObject(String bucketName, String objectName, InputStream inputStream) throws Exception {
return minioClient.putObject(
PutObjectArgs.builder()
.bucket(bucketName)
.object(objectName)
.stream(inputStream, -1, 10 * 1024 * 1024)
.build()
);
}
public static ObjectWriteResponse putObject(String bucketName, String objectName, InputStream inputStream, String contentType) throws Exception {
return minioClient.putObject(
PutObjectArgs.builder()
.bucket(bucketName)
.object(objectName)
.contentType(contentType)
.stream(inputStream, -1, 10 * 1024 * 1024)
.build()
);
}
public static ObjectWriteResponse putDirObject(String bucketName, String objectName)
throws Exception {
return minioClient.putObject(
PutObjectArgs.builder().bucket(bucketName).object(objectName)
.stream(new ByteArrayInputStream(new byte[]{}), 0, -1).build()
);
}
public static StatObjectResponse statObject(String bucketName, String objectName) throws Exception {
return minioClient.statObject(StatObjectArgs.builder().bucket(bucketName).object(objectName).build());
}
public static ObjectWriteResponse copyObject(String bucketName, String objectName,
String srcBucketName, String srcObjectName)
throws Exception {
return minioClient.copyObject(
CopyObjectArgs.builder()
.source(CopySource.builder().bucket(bucketName).object(objectName).build())
.bucket(srcBucketName)
.object(srcObjectName)
.build()
);
}
public static void removeObject(String bucketName, String objectName) throws Exception {
minioClient.removeObject(RemoveObjectArgs.builder().bucket(bucketName).object(objectName).build());
}
public static void removeObjects(String bucketName, List<String> keys) {
List<DeleteObject> deleteObjectList = new LinkedList<>();
keys.forEach(s -> {
deleteObjectList.add(new DeleteObject(s));
try {
removeObject(bucketName, s);
} catch (Exception e) {
log.error("批量删除失败!error:", e);
}
});
}
/**
* 组合(合并)已存在的对象为一个新对象(用于前端分片上传的服务端合并)
*/
public static ObjectWriteResponse composeObject(String bucketName, String targetObjectName, List<String> sourceObjectNames, String contentType) throws Exception {
List<ComposeSource> sources = new ArrayList<>();
for (String src : sourceObjectNames) {
sources.add(ComposeSource.builder().bucket(bucketName).object(src).build());
}
return minioClient.composeObject(
ComposeObjectArgs.builder()
.bucket(bucketName)
.object(targetObjectName)
.sources(sources)
.headers(contentType == null ? null : java.util.Map.of("Content-Type", contentType))
.build()
);
}
public static String getTempVisitUrl(String bucketName, String objectName) {
String url = "";
try {
url = minioClient.getPresignedObjectUrl(
GetPresignedObjectUrlArgs.builder()
.bucket(bucketName)
.object(objectName)
.expiry(15, TimeUnit.MINUTES) // 过期时间15分钟
.method(io.minio.http.Method.GET)
.build());
} catch (Exception e) {
log.error("获取临时访问链接失败!error:", e);
}
return url;
}
public static String encodeObjectPath(String objectName) {
if (objectName == null || objectName.isEmpty()) return objectName;
String[] parts = objectName.split("/");
StringBuilder sb = new StringBuilder();
for (int i = 0; i < parts.length; i++) {
if (i > 0) sb.append('/');
try {
sb.append(URLEncoder.encode(parts[i], "UTF-8"));
} catch (Exception e) {
sb.append(parts[i]);
}
}
return sb.toString();
}
/**
* 删除文件夹及文件夹下所有文件
*
* @param bucketName bucket名称
* @param objectName 文件或文件夹名称
* @since tarzan LIU
*/
public static void deleteObject(String bucketName, String objectName) {
if (StringUtils.isNotBlank(objectName)) {
if (objectName.endsWith("/")) {
Iterable<Result<Item>> list = minioClient.listObjects(ListObjectsArgs.builder().bucket(bucketName).prefix(objectName).recursive(false).build());
list.forEach(e -> {
try {
if (e.get().isDir()) {
deleteObject(bucketName, e.get().objectName());
}
} catch (Exception ex) {
ex.printStackTrace();
}
try {
minioClient.removeObject(RemoveObjectArgs.builder().bucket(bucketName).object(e.get().objectName()).build());
} catch (Exception ec) {
ec.printStackTrace();
}
});
}
}
}
public static String getUtf8ByURLDecoder(String str) throws UnsupportedEncodingException {
String url = str.replaceAll("%(?![0-9a-fA-F]{2})", "%25");
return URLDecoder.decode(url, "UTF-8");
}
}