依赖
因为aws需要发送请求上传、下载等api,所以需要加上httpclient相关的依赖
xml
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
<version>1.11.628</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.13</version>
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
<version>4.4.13</version>
</dependency>
工具类
上传和下载方式都被重载了,所以可以根据不同的业务场景去使用不同的重载方式。
java
import com.amazonaws.AmazonServiceException;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.Protocol;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.*;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
import com.amazonaws.services.s3.transfer.Upload;
import com.amazonaws.util.StringUtils;
import com.mocha.order.enums.PropertiesEnum;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.InputStream;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
@Component
public class CosUtil {
private static final Logger LOGGER = LoggerFactory.getLogger(CosUtil.class);
//访问key
public static String accessKey;
//密钥key
public static String secretKey;
//服务区域
public static String serviceEndpoint; // e.g., "cos.gz-tst.cos.tg.unicom.local"
//区域
public static String region; // e.g., "gz-tst"
static AWSCredentials credentials ;
static AWSStaticCredentialsProvider awsStaticCredentialsProvider ;
static ClientConfiguration config ;
static AwsClientBuilder.EndpointConfiguration endpointConfiguration ;
static AmazonS3 conn ;
//分片大小5M,一般设置5的倍数即可,分片大小最大不要超过100M
public static final int FIVE_PARTSIZE=5242880;
//1M
public static final int ONE_PARTSIZE=1048576;
@PostConstruct
public void init() {
//这些配置的值,可以从数据库获取、也可以丛配置文件获取
final String cosAccessKey = PropertiesEnum.getCosAccessKey();
final String cosSecretKey = PropertiesEnum.getCosSecretKey();
final String cosServiceEndpoint = PropertiesEnum.getCosServiceEndpoint();
final String cosRegion = PropertiesEnum.getCosRegion();
if (org.apache.commons.lang3.StringUtils.isBlank(cosAccessKey)){
LOGGER.error("联通云Cos配置的AccessKey为空,请保证【COS_ACCESSKEY】值不为空,否则会影响项目相关功能的使用");
}
if (org.apache.commons.lang3.StringUtils.isBlank(cosSecretKey)){
LOGGER.error("联通云Cos配置的SecretKey为空,请保证【COS_SECRETKEY】值不为空,否则会影响项目相关功能的使用");
}
if (org.apache.commons.lang3.StringUtils.isBlank(cosServiceEndpoint)){
LOGGER.error("联通云Cos配置的ServiceEndpoint为空,请保证【COS_SERVICEENDPOINT】值不为空,否则会影响项目相关功能的使用");
}
if (org.apache.commons.lang3.StringUtils.isBlank(cosRegion)){
LOGGER.error("联通云Cos配置的Region为空,请保证【COS_REGION】值不为空,否则会影响项目相关功能的使用");
}
LOGGER.info("联通云Cos的配置分别为,accessKey:{},secretKey:{},serviceEndpoint:{},region:{}",cosAccessKey,cosAccessKey,cosServiceEndpoint,cosRegion);
LOGGER.info("开始初始化联通云Cos的配置,开始初始化时间:{}",new Date());
accessKey=cosAccessKey;
secretKey=cosSecretKey;
serviceEndpoint=cosServiceEndpoint;
region=cosRegion;
LOGGER.info("结束初始化联通云Cos的配置,结束初始化时间:{}",new Date());
credentials = new BasicAWSCredentials(accessKey, secretKey);
awsStaticCredentialsProvider = new AWSStaticCredentialsProvider(credentials);
config = new ClientConfiguration();
endpointConfiguration = new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region);
conn = AmazonS3ClientBuilder.standard()
.withCredentials(awsStaticCredentialsProvider)
.withClientConfiguration(config.withProtocol(Protocol.HTTP).withSignerOverride("S3SignerType"))
.withEndpointConfiguration(endpointConfiguration).build();
}
//检查桶是否存在
public static boolean doesBucketExist(String bucketName){
return conn.listBuckets().stream().map(Bucket::getName).collect(Collectors.toList()).contains(bucketName);
}
/**
* 创建桶
* 1、创建已经存在的桶,不会把之前存在的桶内容删除
*
*/
public static Bucket createBucket(String bucketName){
LOGGER.info("创建桶:{}",bucketName);
return conn.createBucket(bucketName);
}
// 列出所有桶列表
public static List<Bucket> listBuckets(){
return conn.listBuckets();
}
// 列出桶内对象
public static ObjectListing listObjects(String bucketName){
ObjectListing objects = conn.listObjects(bucketName);
do {
for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
LOGGER.info(objectSummary.getKey() + "\t" + objectSummary.getSize() + "\t" + StringUtils.fromDate(objectSummary.getLastModified()));
}
objects = conn.listNextBatchOfObjects(objects);
} while (objects.isTruncated());
return objects;
}
// 获取文件
public static ObjectMetadata getObjectMeta(String bucketName, String fileName){
LOGGER.info("上传文件,桶名称:{},文件名称:{}",bucketName,fileName);
return conn.getObjectMetadata(bucketName, fileName);
}
// 上传文件-通过实体类
public static PutObjectResult uploadFile(PutObjectRequest putObjectRequest){
LOGGER.info("上传文件桶实体类方式,实体类是:{}",putObjectRequest);
return conn.putObject(putObjectRequest);
}
// 上传文件-通过File
public static PutObjectResult uploadFile(String bucketName, String keyName, File file){
LOGGER.info("上传文件,桶名称:{},文件名称:{}",bucketName,keyName);
return conn.putObject(bucketName, keyName, file);
}
// 上传文件-通过InputStream
public static PutObjectResult uploadFile(String bucketName, String keyName, InputStream inputStream){
LOGGER.info("上传文件,桶名称:{},文件名称:{}",bucketName,keyName);
return conn.putObject(bucketName, keyName, inputStream, new ObjectMetadata());
}
// 上传文件-通过字符串
public static PutObjectResult uploadFile(String bucketName, String keyName, String content){
LOGGER.info("上传文件,桶名称:{},文件名称:{}",bucketName,keyName);
return conn.putObject(bucketName, keyName, content);
}
// 分片上传(不要需要指定分片的大小、还提供上传进度跟踪、断点续传、并发上传等功能)
public static Upload uploadFileByShard(String bucketName, String keyName, String filePath) throws InterruptedException,AmazonServiceException {
TransferManager tm = TransferManagerBuilder.standard().withS3Client(conn).build();
try {
LOGGER.info("开始分片上传,桶名称:{},文件名称:{},路径:{}",bucketName,keyName,filePath);
Upload upload = tm.upload(bucketName, keyName, new File(filePath));
upload.waitForCompletion();
LOGGER.info("结束分片上传,桶名称:{},文件名称:{},路径:{}",bucketName,keyName,filePath);
return upload;
} catch (AmazonServiceException | InterruptedException e) {
LOGGER.error("分片上传异常,桶名称:{},文件名称:{},路径:{}",bucketName,keyName,filePath);
e.printStackTrace();
throw e;
}
}
/**
* 大文件分段上传(需要指定分片的大小,自定义的线程池)
*
* @param bucketName bucketName
* @param objectName objectName
* @param file MultipartFile
* @param minPartSize 每片大小,单位:字节(eg:5242880 <- 5m)
* @return
*/
public static boolean uploadMultipartFileByPart(String bucketName, String objectName, File file, int minPartSize) {
long size = file.length();
final String fileName = file.getName();
if (size==0) {
LOGGER.error("分片上传的文件:{}为空",fileName);
return false;
}
// 计算分片大小
// 得到总共的段数,和 分段后,每个段的开始上传的字节位置
List<Long> positions = Collections.synchronizedList(new ArrayList<>());
long filePosition = 0;
while (filePosition < size) {
positions.add(filePosition);
filePosition += Math.min(minPartSize, (size - filePosition));
}
if (LOGGER.isDebugEnabled()) {
LOGGER.info("文件:{},总大小:{}字节,分为{}段",fileName, size, positions.size());
}
// 创建一个列表保存所有分传的 PartETag, 在分段完成后会用到
List<PartETag> partETags = Collections.synchronizedList(new ArrayList<>());
// 第一步,初始化,声明下面将有一个 Multipart Upload
// 设置文件类型
ObjectMetadata metadata = new ObjectMetadata();
String fileType = fileName.substring(fileName.lastIndexOf(".") + 1);
metadata.setContentType(fileType);
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, objectName, metadata);
InitiateMultipartUploadResult initResponse = conn.initiateMultipartUpload(initRequest);
if (LOGGER.isDebugEnabled()) {
LOGGER.info("分片上传开始,上传的文件时:{}",fileName);
}
//声明线程池
ExecutorService exec = Executors.newFixedThreadPool(5);
long begin = System.currentTimeMillis();
try {
for (int i = 0; i < positions.size(); i++) {
int finalI = i;
exec.execute(() -> {
long time1 = System.currentTimeMillis();
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(objectName)
.withUploadId(initResponse.getUploadId())
.withPartNumber(finalI + 1)
.withFileOffset(positions.get(finalI))
.withFile(file)
.withPartSize(Math.min(minPartSize, (size - positions.get(finalI))));
// 第二步,上传分段,并把当前段的 PartETag 放到列表中
partETags.add(conn.uploadPart(uploadRequest).getPartETag());
LOGGER.info("分片上传的文件时:{},第{}段上传耗时:{}",fileName ,finalI + 1, (System.currentTimeMillis() - time1));
});
}
//任务结束关闭线程池
exec.shutdown();
//判断线程池是否结束,不加会直接结束方法
while (true) {
if (exec.isTerminated()) {
break;
}
}
// 第三步,完成上传,合并分段
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, objectName, initResponse.getUploadId(), partETags);
conn.completeMultipartUpload(compRequest);
} catch (Exception e) {
conn.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, objectName, initResponse.getUploadId()));
LOGGER.error("分片上传文件:{}异常,异常是:{}, ",fileName, e.getLocalizedMessage());
e.printStackTrace();
}
LOGGER.info("分片上传文件:{}结束,总上传耗时:{}毫秒",fileName, (System.currentTimeMillis() - begin));
return false;
}
/**
* 初始化,声明有一个Multipart Upload
*
* @param initRequest 初始化请求
* @return 初始化返回
*/
private InitiateMultipartUploadResult initiateMultipartUpload(
InitiateMultipartUploadRequest initRequest) {
return conn.initiateMultipartUpload(initRequest);
}
/**
* 上传分段
*
* @param uploadRequest 上传请求
* @return 上传分段返回
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart">AWS
* API Documentation</a>
*/
private UploadPartResult uploadPart(UploadPartRequest uploadRequest) {
return conn.uploadPart(uploadRequest);
}
/**
* 分段合并
*
* @param compRequest 合并请求
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload">AWS
* API Documentation</a>
*/
private CompleteMultipartUploadResult completeMultipartUpload(
CompleteMultipartUploadRequest compRequest) {
return conn.completeMultipartUpload(compRequest);
}
/**
* 中止分片上传
*
* @param uploadRequest 中止文件上传请求
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload">AWS
* API Documentation</a>
*/
private void abortMultipartUpload(AbortMultipartUploadRequest uploadRequest) {
conn.abortMultipartUpload(uploadRequest);
}
// 修改对象的访问控制权限
public static void modifyFileAccessAuthority(String bucketName, String keyName, CannedAccessControlList cannedAccessControlList){
LOGGER.info("修改文件的访问权限,桶名称:{},文件名称:{},权限类型时:{}",bucketName,keyName,cannedAccessControlList.toString());
conn.setObjectAcl(bucketName, keyName, cannedAccessControlList);
}
// 下载一个对象(到指定路径)
public static ObjectMetadata downloadFileAssignPath(String bucketName, String keyName, String fileSavePath){
return conn.getObject(new GetObjectRequest(bucketName, keyName), new File(fileSavePath));
}
// 生成对象下载链接(带签名)
public static URL generatorFileUrl(String bucketName, String keyName){
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucketName, keyName);
return conn.generatePresignedUrl(request);
}
// 删除文件
public static void deleteFie(String bucketName, String keyName){
LOGGER.info("删除文件,桶名称:{},文件名称:{},删除时间:{}",bucketName,keyName,new Date());
conn.deleteObject(bucketName, keyName);
}
// 删除桶
public static void deleteFie(String bucketName){
LOGGER.info("删除桶,桶名称:{},删除时间:{}",bucketName,new Date());
conn.deleteBucket(bucketName);
}
public static void main(String[] args) {
String filePath = "G:\\mk\\GDrepo\\repo\\org\\apache\\httpcomponents\\httpcomponents-core\\4.0.1\\httpcomponents-core-4.0.1.pom"; //文件路径
// ByteArrayInputStream input1 = new ByteArrayInputStream("Hello World!".getBytes());
// CosUtil.createBucket("aaa");
// final FileInputStream inputStream;
// try {
// inputStream = new FileInputStream("G:\\mk\\GDrepo\\repo\\org\\apache\\httpcomponents\\httpcomponents-core\\4.0.1\\httpcomponents-core-4.0.1.pom");
// } catch (FileNotFoundException e) {
// throw new RuntimeException(e);
// }
// CosUtil.uploadFile("aaa","a.pom",inputStream);
CosUtil.uploadFile("test","a.txt",new File(filePath));
CosUtil.uploadFile("test","a.txt",new File(filePath));
//上传空文件夹
conn.putObject("aaa", "demo" + "/","");
//上传文件到指定的文件夹
final PutObjectRequest putObjectRequest = new PutObjectRequest("aaa", "demo" + "/" + "a.txt", new File(filePath));
conn.putObject(putObjectRequest);
// 列出所有桶列表
List<Bucket> buckets = conn.listBuckets();
for (Bucket bucket : buckets) {
System.out.println(bucket.getName() + "\t" + StringUtils.fromDate(bucket.getCreationDate()));
}
// 创建桶
Bucket bucket = conn.createBucket("111");
Bucket bucket1 = conn.createBucket("111");
Bucket bucket12 = conn.createBucket("111");
System.out.println(bucket.getName());
// 列出桶内对象
ObjectListing objects = conn.listObjects(bucket.getName());
do {
for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
System.out.println(objectSummary.getKey() + "\t" +
objectSummary.getSize() + "\t" +
StringUtils.fromDate(objectSummary.getLastModified()));
}
objects = conn.listNextBatchOfObjects(objects);
} while (objects.isTruncated());
// 创建对象
ByteArrayInputStream input = new ByteArrayInputStream("Hello World!".getBytes());
conn.putObject(bucket.getName(), "hello.txt", input, new ObjectMetadata());
//分段上传
String keyName = "demoya"; //文件上传成功后的文件名
TransferManager tm = TransferManagerBuilder.standard().withS3Client(conn).build();
try {
// TransferManager processes all transfers asynchronously,
// so this call returns immediately.
System.out.println("开始分片上传");
Upload upload = tm.upload("my-test-bucket", keyName, new File(filePath));
//Optionally, wait for the upload to finish before continuing.
upload.waitForCompletion();
System.out.println("结束分片上传");
} catch (AmazonServiceException | InterruptedException e) {
// The call was transmitted successfully, but Amazon S3 couldn't process
// it, so it returned an error response.
e.printStackTrace();
}
// 修改对象的访问控制权限
conn.setObjectAcl(bucket.getName(), "hello.txt", CannedAccessControlList.PublicRead);
// 下载一个对象(到本地文件)
conn.getObject(new GetObjectRequest(bucket.getName(), "hello.txt"), new File("G:\\mk\\hello.txt"));
// 生成对象下载链接(带签名)
GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucket.getName(), "hello.txt");
System.out.println(conn.generatePresignedUrl(request));
// 删除一个对象
conn.deleteObject(bucket.getName(), "hello.txt");
//删除桶
conn.deleteBucket(bucket.getName());
}
}