网站seo啥意思软件制作思维导图的优势
网站seo啥意思,软件制作思维导图的优势,c2c网站开设店铺,psd模板怎么做网站文章目录 前言一、功能展示上传功能点下载功能点效果展示 二、思路流程上传流程下载流程 三、代码示例四、疑问 前言
Amazon Simple Storage Service#xff08;S3#xff09;#xff0c;简单存储服务#xff0c;是一个公开的云存储服务。Web应用程序开发人员可以使用它存… 文章目录 前言一、功能展示上传功能点下载功能点效果展示 二、思路流程上传流程下载流程 三、代码示例四、疑问 前言
Amazon Simple Storage ServiceS3简单存储服务是一个公开的云存储服务。Web应用程序开发人员可以使用它存储数字资产包括图片、视频、音乐和文档。S3提供一个RESTful API以编程方式实现与该服务的交互。目前市面上主流的存储厂商都支持S3协议接口。
本文借鉴风希落https://www.cnblogs.com/jsonq/p/18186340大佬的文章及代码修改而来。
项目采用前后端分离模式 前端vue3 element-plus axios spark-md5 后端Springboot 3X minioaws-s3 redis mysql mybatisplus
本文全部代码以上传giteehttps://gitee.com/luzhiyong_erfou/learning-notes/tree/master/aws-s3-upload
一、功能展示
上传功能点
大文件分片上传文件秒传断点续传上传进度
下载功能点
分片下载暂停下载下载进度
效果展示 二、思路流程
上传流程
一个文件的上传对接后端的请求有三个
点击上传时请求 检查文件 md5 接口判断文件的状态已存在、未存在、传输部分根据不同的状态通过 初始化分片上传地址得到该文件的分片地址前端将分片地址和分片文件一一对应进行上传直接上传至对象存储上传完毕调用 合并文件 接口合并文件文件数据入库
整体步骤
前端计算文件 md5并发请求查询此文件的状态若文件已上传则后端直接返回上传成功并返回 url 地址若文件未上传则前端请求初始化分片接口返回上传地址。循环将分片文件和分片地址一一对一应 若文件上传一部分后端会返回该文件的uploadId minio中的文件标识和listParts已上传的分片索引前端请求初始化分片接口后端重新生成上传地址。前端循环将已上传的分片过滤掉未上传的分片和分片地址一一对应。前端通过分片地址将分片文件一一上传上传完毕后前端调用合并分片接口后端判断该文件是单片还是分片单片则不走合并仅信息入库分片则先合并再信息入库。删除 redis 中的文件信息返回文件地址。
下载流程
整体步骤
前端计算分片下载的请求次数并设置每次请求的偏移长度循环调用后端接口后端判断文件是否缓存并获取文件信息根据前端传入的便宜长度和分片大小获取文件流返回前端前端记录每片的blob根据文件流转成的 blob 下载文件 三、代码示例
service
import cn.hutool.core.bean.BeanUtil;
import cn.hutool.core.date.DateUtil;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.StrUtil;
import cn.hutool.json.JSONUtil;
import cn.superlu.s3uploadservice.common.R;
import cn.superlu.s3uploadservice.config.FileProperties;
import cn.superlu.s3uploadservice.constant.FileHttpCodeEnum;
import cn.superlu.s3uploadservice.mapper.SysFileUploadMapper;
import cn.superlu.s3uploadservice.model.bo.FileUploadInfo;
import cn.superlu.s3uploadservice.model.entity.SysFileUpload;
import cn.superlu.s3uploadservice.model.vo.BaseFileVo;
import cn.superlu.s3uploadservice.model.vo.UploadUrlsVO;
import cn.superlu.s3uploadservice.service.SysFileUploadService;
import cn.superlu.s3uploadservice.utils.AmazonS3Util;
import cn.superlu.s3uploadservice.utils.MinioUtil;
import cn.superlu.s3uploadservice.utils.RedisUtil;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import jakarta.servlet.http.HttpServletRequest;
import jakarta.servlet.http.HttpServletResponse;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Service;import java.io.BufferedOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.time.LocalDateTime;
import java.util.List;
import java.util.concurrent.TimeUnit;Service
Slf4j
RequiredArgsConstructor
public class SysFileUploadServiceImpl extends ServiceImplSysFileUploadMapper, SysFileUpload implements SysFileUploadService {private static final Integer BUFFER_SIZE 1024 * 64; // 64KBprivate final RedisUtil redisUtil;private final MinioUtil minioUtil;private final AmazonS3Util amazonS3Util;private final FileProperties fileProperties;/*** 检查文件是否存在* param md5* return*/Overridepublic RBaseFileVoFileUploadInfo checkFileByMd5(String md5) {log.info(查询md5: {} 在redis是否存在, md5);FileUploadInfo fileUploadInfo (FileUploadInfo)redisUtil.get(md5);if (fileUploadInfo ! null) {log.info(查询到md5:在redis中存在:{}, JSONUtil.toJsonStr(fileUploadInfo));if(fileUploadInfo.getChunkCount()1){return R.ok( BaseFileVo.builder(FileHttpCodeEnum.NOT_UPLOADED, null));}else{ListInteger listParts minioUtil.getListParts(fileUploadInfo.getObject(), fileUploadInfo.getUploadId());
// ListInteger listParts amazonS3Util.getListParts(fileUploadInfo.getObject(), fileUploadInfo.getUploadId());fileUploadInfo.setListParts(listParts);return R.ok( BaseFileVo.builder(FileHttpCodeEnum.UPLOADING, fileUploadInfo));}}log.info(redis中不存在md5: {} 查询mysql是否存在, md5);SysFileUpload file baseMapper.selectOne(new LambdaQueryWrapperSysFileUpload().eq(SysFileUpload::getMd5, md5));if (file ! null) {log.info(mysql中存在md5: {} 的文件 该文件已上传至minio 秒传直接过, md5);FileUploadInfo dbFileInfo BeanUtil.toBean(file, FileUploadInfo.class);return R.ok( BaseFileVo.builder(FileHttpCodeEnum.UPLOAD_SUCCESS, dbFileInfo));}return R.ok( BaseFileVo.builder(FileHttpCodeEnum.NOT_UPLOADED, null));}/*** 初始化文件分片地址及相关数据* param fileUploadInfo* return*/Overridepublic RBaseFileVoUploadUrlsVO initMultipartUpload(FileUploadInfo fileUploadInfo) {log.info(查询md5: {} 在redis是否存在, fileUploadInfo.getMd5());FileUploadInfo redisFileUploadInfo (FileUploadInfo)redisUtil.get(fileUploadInfo.getMd5());// 若 redis 中有该 md5 的记录以 redis 中为主String object;if (redisFileUploadInfo ! null) {fileUploadInfo redisFileUploadInfo;object redisFileUploadInfo.getObject();} else {String originFileName fileUploadInfo.getOriginFileName();String suffix FileUtil.extName(originFileName);String fileName FileUtil.mainName(originFileName);// 对文件重命名并以年月日文件夹格式存储String nestFile DateUtil.format(LocalDateTime.now(), yyyy/MM/dd);object nestFile / fileName _ fileUploadInfo.getMd5() . suffix;fileUploadInfo.setObject(object).setType(suffix);}UploadUrlsVO urlsVO;// 单文件上传if (fileUploadInfo.getChunkCount() 1) {log.info(当前分片数量 {} 单文件上传, fileUploadInfo.getChunkCount());
// urlsVO minioUtil.getUploadObjectUrl(fileUploadInfo.getContentType(), object);urlsVOamazonS3Util.getUploadObjectUrl(fileUploadInfo.getContentType(), object);} else {// 分片上传log.info(当前分片数量 {} 分片上传, fileUploadInfo.getChunkCount());
// urlsVO minioUtil.initMultiPartUpload(fileUploadInfo, object);urlsVO amazonS3Util.initMultiPartUpload(fileUploadInfo, object);}fileUploadInfo.setUploadId(urlsVO.getUploadId());// 存入 redis 单片存 redis 唯一用处就是可以让单片也入库因为单片只有一个请求基本不会出现问题redisUtil.set(fileUploadInfo.getMd5(), fileUploadInfo, fileProperties.getOss().getBreakpointTime(), TimeUnit.DAYS);return R.ok(BaseFileVo.builder(FileHttpCodeEnum.SUCCESS, urlsVO));}/*** 合并分片* param md5* return*/Overridepublic RBaseFileVoString mergeMultipartUpload(String md5) {FileUploadInfo redisFileUploadInfo (FileUploadInfo)redisUtil.get(md5);String url StrUtil.format({}/{}/{}, fileProperties.getOss().getEndpoint(), fileProperties.getBucketName(), redisFileUploadInfo.getObject());SysFileUpload files BeanUtil.toBean(redisFileUploadInfo, SysFileUpload.class);files.setUrl(url).setBucket(fileProperties.getBucketName()).setCreateTime(LocalDateTime.now());Integer chunkCount redisFileUploadInfo.getChunkCount();// 分片为 1 不需要合并否则合并后看返回的是 true 还是 falseboolean isSuccess chunkCount 1 || minioUtil.mergeMultipartUpload(redisFileUploadInfo.getObject(), redisFileUploadInfo.getUploadId());
// boolean isSuccess chunkCount 1 || amazonS3Util.mergeMultipartUpload(redisFileUploadInfo.getObject(), redisFileUploadInfo.getUploadId());if (isSuccess) {baseMapper.insert(files);redisUtil.del(md5);return R.ok(BaseFileVo.builder(FileHttpCodeEnum.SUCCESS, url));}return R.ok(BaseFileVo.builder(FileHttpCodeEnum.UPLOAD_FILE_FAILED, null));}/*** 分片下载* param id* param request* param response* return* throws IOException*/Overridepublic ResponseEntitybyte[] downloadMultipartFile(Long id, HttpServletRequest request, HttpServletResponse response) throws IOException {// redis 缓存当前文件信息避免分片下载时频繁查库SysFileUpload file null;SysFileUpload redisFile (SysFileUpload)redisUtil.get(String.valueOf(id));if (redisFile null) {SysFileUpload dbFile baseMapper.selectById(id);if (dbFile null) {return null;} else {file dbFile;redisUtil.set(String.valueOf(id), file, 1, TimeUnit.DAYS);}} else {file redisFile;}String range request.getHeader(Range);String fileName file.getOriginFileName();log.info(下载文件的 object {}, file.getObject());// 获取 bucket 桶中的文件元信息获取不到会抛出异常
// StatObjectResponse objectResponse minioUtil.statObject(file.getObject());S3Object s3Object amazonS3Util.statObject(file.getObject());long startByte 0; // 开始下载位置
// long fileSize objectResponse.size();long fileSize s3Object.getObjectMetadata().getContentLength();long endByte fileSize - 1; // 结束下载位置log.info(文件总长度{}当前 range{}, fileSize, range);BufferedOutputStream os null; // buffer 写入流
// GetObjectResponse stream null; // minio 文件流// 存在 range需要根据前端下载长度进行下载即分段下载// 例如rangebytes0-52428800if (range ! null range.contains(bytes) range.contains(-)) {range range.substring(range.lastIndexOf() 1).trim(); // 0-52428800String[] ranges range.split(-);// 判断range的类型if (ranges.length 1) {// 类型一bytes-2343 后端转换为 0-2343if (range.startsWith(-)) endByte Long.parseLong(ranges[0]);// 类型二bytes2343- 后端转换为 2343-最后if (range.endsWith(-)) startByte Long.parseLong(ranges[0]);} else if (ranges.length 2) { // 类型三bytes22-2343startByte Long.parseLong(ranges[0]);endByte Long.parseLong(ranges[1]);}}// 要下载的长度// 确保返回的 contentLength 不会超过文件的实际剩余大小long contentLength Math.min(endByte - startByte 1, fileSize - startByte);// 文件类型String contentType request.getServletContext().getMimeType(fileName);// 解决下载文件时文件名乱码问题byte[] fileNameBytes fileName.getBytes(StandardCharsets.UTF_8);fileName new String(fileNameBytes, 0, fileNameBytes.length, StandardCharsets.ISO_8859_1);// 响应头设置---------------------------------------------------------------------------------------------// 断点续传获取部分字节内容response.setHeader(Accept-Ranges, bytes);// http状态码要为206表示获取部分内容,SC_PARTIAL_CONTENT,若部分浏览器不支持改成 SC_OKresponse.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT);response.setContentType(contentType);
// response.setHeader(Last-Modified, objectResponse.lastModified().toString());response.setHeader(Last-Modified, s3Object.getObjectMetadata().getLastModified().toString());response.setHeader(Content-Disposition, attachment;filename fileName);response.setHeader(Content-Length, String.valueOf(contentLength));// Content-Range格式为[要下载的开始位置]-[结束位置]/[文件总大小]response.setHeader(Content-Range, bytes startByte - endByte / fileSize);
// response.setHeader(ETag, \.concat(objectResponse.etag()).concat(\));response.setHeader(ETag, \.concat(s3Object.getObjectMetadata().getETag()).concat(\));response.setContentType(application/octet-stream;charsetUTF-8);S3ObjectInputStream objectInputStreamnull;try {// 获取文件流String object s3Object.getKey();S3Object currentObject amazonS3Util.getObject(object, startByte, contentLength);objectInputStream currentObject.getObjectContent();
// stream minioUtil.getObject(objectResponse.object(), startByte, contentLength);os new BufferedOutputStream(response.getOutputStream());// 将读取的文件写入到 OutputStreambyte[] bytes new byte[BUFFER_SIZE];long bytesWritten 0;int bytesRead -1;while ((bytesRead objectInputStream.read(bytes)) ! -1) {
// while ((bytesRead stream.read(bytes)) ! -1) {if (bytesWritten bytesRead contentLength) {os.write(bytes, 0, (int)(contentLength - bytesWritten));break;} else {os.write(bytes, 0, bytesRead);bytesWritten bytesRead;}}os.flush();response.flushBuffer();// 返回对应http状态return new ResponseEntity(bytes, HttpStatus.OK);} catch (Exception e) {e.printStackTrace();} finally {if (os ! null) os.close();
// if (stream ! null) stream.close();if (objectInputStream ! null) objectInputStream.close();}return null;}Overridepublic RListSysFileUpload getFileList() {ListSysFileUpload filesList this.list();return R.ok(filesList);}}
AmazonS3Util import cn.hutool.core.util.IdUtil;
import cn.superlu.s3uploadservice.config.FileProperties;
import cn.superlu.s3uploadservice.constant.FileHttpCodeEnum;
import cn.superlu.s3uploadservice.model.bo.FileUploadInfo;
import cn.superlu.s3uploadservice.model.vo.UploadUrlsVO;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.HttpMethod;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.model.*;
import com.google.common.collect.HashMultimap;
import io.minio.GetObjectArgs;
import io.minio.GetObjectResponse;
import io.minio.StatObjectArgs;
import io.minio.StatObjectResponse;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.Resource;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;import java.net.URL;
import java.util.*;
import java.util.stream.Collectors;Slf4j
Component
public class AmazonS3Util {Resourceprivate FileProperties fileProperties;private AmazonS3 amazonS3;// spring自动注入会失败PostConstructpublic void init() {ClientConfiguration clientConfiguration new ClientConfiguration();clientConfiguration.setMaxConnections(100);AwsClientBuilder.EndpointConfiguration endpointConfiguration new AwsClientBuilder.EndpointConfiguration(fileProperties.getOss().getEndpoint(), fileProperties.getOss().getRegion());AWSCredentials awsCredentials new BasicAWSCredentials(fileProperties.getOss().getAccessKey(),fileProperties.getOss().getSecretKey());AWSCredentialsProvider awsCredentialsProvider new AWSStaticCredentialsProvider(awsCredentials);this.amazonS3 AmazonS3ClientBuilder.standard().withEndpointConfiguration(endpointConfiguration).withClientConfiguration(clientConfiguration).withCredentials(awsCredentialsProvider).disableChunkedEncoding().withPathStyleAccessEnabled(true).build();}/*** 获取 Minio 中已经上传的分片文件* param object 文件名称* param uploadId 上传的文件id由 minio 生成* return ListInteger*/SneakyThrowspublic ListInteger getListParts(String object, String uploadId) {ListPartsRequest listPartsRequest new ListPartsRequest( fileProperties.getBucketName(), object, uploadId);PartListing listParts amazonS3.listParts(listPartsRequest);return listParts.getParts().stream().map(PartSummary::getPartNumber).collect(Collectors.toList());}/*** 单文件签名上传* param object 文件名称uuid 格式* return UploadUrlsVO*/public UploadUrlsVO getUploadObjectUrl(String contentType, String object) {try {log.info({} 开始单文件上传, object);UploadUrlsVO urlsVO new UploadUrlsVO();ListString urlList new ArrayList();// 主要是针对图片若需要通过浏览器直接查看而不是下载需要指定对应的 content-typeHashMultimapString, String headers HashMultimap.create();if (contentType null || contentType.equals()) {contentType application/octet-stream;}headers.put(Content-Type, contentType);String uploadId IdUtil.simpleUUID();MapString, String reqParams new HashMap();reqParams.put(uploadId, uploadId);//生成预签名的 URLGeneratePresignedUrlRequest generatePresignedUrlRequest new GeneratePresignedUrlRequest(fileProperties.getBucketName(),object, HttpMethod.PUT);generatePresignedUrlRequest.addRequestParameter(uploadId, uploadId);URL url amazonS3.generatePresignedUrl(generatePresignedUrlRequest);urlList.add(url.toString());urlsVO.setUploadId(uploadId).setUrls(urlList);return urlsVO;} catch (Exception e) {log.error(单文件上传失败: {}, e.getMessage());throw new RuntimeException(FileHttpCodeEnum.UPLOAD_FILE_FAILED.getMsg());}}/*** 初始化分片上传* param fileUploadInfo 前端传入的文件信息* param object object* return UploadUrlsVO*/public UploadUrlsVO initMultiPartUpload(FileUploadInfo fileUploadInfo, String object) {Integer chunkCount fileUploadInfo.getChunkCount();String contentType fileUploadInfo.getContentType();String uploadId fileUploadInfo.getUploadId();log.info(文件{} - 分片{} 初始化分片上传数据 请求头 {}, object, chunkCount, contentType);UploadUrlsVO urlsVO new UploadUrlsVO();try {// 如果初始化时有 uploadId说明是断点续传不能重新生成 uploadIdif (uploadId null || uploadId.equals()) {// 第一步初始化声明下面将有一个 Multipart Upload// 设置文件类型ObjectMetadata metadata new ObjectMetadata();metadata.setContentType(contentType);InitiateMultipartUploadRequest initRequest new InitiateMultipartUploadRequest(fileProperties.getBucketName(),object, metadata);uploadId amazonS3.initiateMultipartUpload(initRequest).getUploadId();log.info(没有uploadId生成新的{},uploadId);}urlsVO.setUploadId(uploadId);ListString partList new ArrayList();for (int i 1; i chunkCount; i) {//生成预签名的 URL//设置过期时间例如 1 小时后Date expiration new Date(System.currentTimeMillis() 3600 * 1000);GeneratePresignedUrlRequest generatePresignedUrlRequest new GeneratePresignedUrlRequest(fileProperties.getBucketName(), object,HttpMethod.PUT).withExpiration(expiration);generatePresignedUrlRequest.addRequestParameter(uploadId, uploadId);generatePresignedUrlRequest.addRequestParameter(partNumber, String.valueOf(i));URL url amazonS3.generatePresignedUrl(generatePresignedUrlRequest);partList.add(url.toString());}log.info(文件初始化分片成功);urlsVO.setUrls(partList);return urlsVO;} catch (Exception e) {log.error(初始化分片上传失败: {}, e.getMessage());// 返回 文件上传失败throw new RuntimeException(FileHttpCodeEnum.UPLOAD_FILE_FAILED.getMsg());}}/*** 合并文件* param object object* param uploadId uploadUd*/SneakyThrowspublic boolean mergeMultipartUpload(String object, String uploadId) {log.info(通过 {}-{}-{} 合并分片上传数据, object, uploadId, fileProperties.getBucketName());//构建查询parts条件ListPartsRequest listPartsRequest new ListPartsRequest(fileProperties.getBucketName(),object,uploadId);listPartsRequest.setMaxParts(1000);listPartsRequest.setPartNumberMarker(0);//请求查询PartListing partListamazonS3.listParts(listPartsRequest);ListPartSummary parts partList.getParts();if (partsnull|| parts.isEmpty()) {// 已上传分块数量与记录中的数量不对应不能合并分块throw new RuntimeException(分片缺失请重新上传);}// 合并分片CompleteMultipartUploadRequest compRequest new CompleteMultipartUploadRequest(fileProperties.getBucketName(),object,uploadId,parts.stream().map(partSummary - new PartETag(partSummary.getPartNumber(), partSummary.getETag())).collect(Collectors.toList()));amazonS3.completeMultipartUpload(compRequest);return true;}/*** 获取文件内容和元信息该文件不存在会抛异常* param object object* return StatObjectResponse*/SneakyThrowspublic S3Object statObject(String object) {return amazonS3.getObject(fileProperties.getBucketName(), object);}SneakyThrowspublic S3Object getObject(String object, Long offset, Long contentLength) {GetObjectRequest request new GetObjectRequest(fileProperties.getBucketName(), object);request.setRange(offset, offset contentLength - 1); // 设置偏移量和长度return amazonS3.getObject(request);}}
minioUtil
import cn.hutool.core.util.IdUtil;
import cn.superlu.s3uploadservice.config.CustomMinioClient;
import cn.superlu.s3uploadservice.config.FileProperties;
import cn.superlu.s3uploadservice.constant.FileHttpCodeEnum;
import cn.superlu.s3uploadservice.model.bo.FileUploadInfo;
import cn.superlu.s3uploadservice.model.vo.UploadUrlsVO;
import com.google.common.collect.HashMultimap;
import io.minio.*;
import io.minio.http.Method;
import io.minio.messages.Part;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.Resource;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;Slf4j
Component
public class MinioUtil {private CustomMinioClient customMinioClient;Resourceprivate FileProperties fileProperties;// spring自动注入会失败PostConstructpublic void init() {MinioAsyncClient minioClient MinioAsyncClient.builder().endpoint(fileProperties.getOss().getEndpoint()).credentials(fileProperties.getOss().getAccessKey(), fileProperties.getOss().getSecretKey()).build();customMinioClient new CustomMinioClient(minioClient);}/*** 获取 Minio 中已经上传的分片文件* param object 文件名称* param uploadId 上传的文件id由 minio 生成* return ListInteger*/SneakyThrowspublic ListInteger getListParts(String object, String uploadId) {ListPartsResponse partResult customMinioClient.listMultipart(fileProperties.getBucketName(), null, object, 1000, 0, uploadId, null, null);return partResult.result().partList().stream().map(Part::partNumber).collect(Collectors.toList());}/*** 单文件签名上传* param object 文件名称uuid 格式* return UploadUrlsVO*/public UploadUrlsVO getUploadObjectUrl(String contentType, String object) {try {log.info({} 开始单文件上传minio, object);UploadUrlsVO urlsVO new UploadUrlsVO();ListString urlList new ArrayList();// 主要是针对图片若需要通过浏览器直接查看而不是下载需要指定对应的 content-typeHashMultimapString, String headers HashMultimap.create();if (contentType null || contentType.equals()) {contentType application/octet-stream;}headers.put(Content-Type, contentType);String uploadId IdUtil.simpleUUID();MapString, String reqParams new HashMap();reqParams.put(uploadId, uploadId);String url customMinioClient.getPresignedObjectUrl(GetPresignedObjectUrlArgs.builder().method(Method.PUT).bucket(fileProperties.getBucketName()).object(object).extraHeaders(headers).extraQueryParams(reqParams).expiry(fileProperties.getOss().getExpiry(), TimeUnit.DAYS).build());urlList.add(url);urlsVO.setUploadId(uploadId).setUrls(urlList);return urlsVO;} catch (Exception e) {log.error(单文件上传失败: {}, e.getMessage());throw new RuntimeException(FileHttpCodeEnum.UPLOAD_FILE_FAILED.getMsg());}}/*** 初始化分片上传* param fileUploadInfo 前端传入的文件信息* param object object* return UploadUrlsVO*/public UploadUrlsVO initMultiPartUpload(FileUploadInfo fileUploadInfo, String object) {Integer chunkCount fileUploadInfo.getChunkCount();String contentType fileUploadInfo.getContentType();String uploadId fileUploadInfo.getUploadId();log.info(文件{} - 分片{} 初始化分片上传数据 请求头 {}, object, chunkCount, contentType);UploadUrlsVO urlsVO new UploadUrlsVO();try {HashMultimapString, String headers HashMultimap.create();if (contentType null || contentType.equals()) {contentType application/octet-stream;}headers.put(Content-Type, contentType);// 如果初始化时有 uploadId说明是断点续传不能重新生成 uploadIdif (fileUploadInfo.getUploadId() null || fileUploadInfo.getUploadId().equals()) {uploadId customMinioClient.initMultiPartUpload(fileProperties.getBucketName(), null, object, headers, null);}urlsVO.setUploadId(uploadId);ListString partList new ArrayList();MapString, String reqParams new HashMap();reqParams.put(uploadId, uploadId);for (int i 1; i chunkCount; i) {reqParams.put(partNumber, String.valueOf(i));String uploadUrl customMinioClient.getPresignedObjectUrl(GetPresignedObjectUrlArgs.builder().method(Method.PUT).bucket(fileProperties.getBucketName()).object(object).expiry(1, TimeUnit.DAYS).extraQueryParams(reqParams).build());partList.add(uploadUrl);}log.info(文件初始化分片成功);urlsVO.setUrls(partList);return urlsVO;} catch (Exception e) {log.error(初始化分片上传失败: {}, e.getMessage());// 返回 文件上传失败throw new RuntimeException(FileHttpCodeEnum.UPLOAD_FILE_FAILED.getMsg());}}/*** 合并文件* param object object* param uploadId uploadUd*/SneakyThrowspublic boolean mergeMultipartUpload(String object, String uploadId) {log.info(通过 {}-{}-{} 合并分片上传数据, object, uploadId, fileProperties.getBucketName());//目前仅做了最大1000分片Part[] parts new Part[1000];// 查询上传后的分片数据ListPartsResponse partResult customMinioClient.listMultipart(fileProperties.getBucketName(), null, object, 1000, 0, uploadId, null, null);int partNumber 1;for (Part part : partResult.result().partList()) {parts[partNumber - 1] new Part(partNumber, part.etag());partNumber;}// 合并分片customMinioClient.mergeMultipartUpload(fileProperties.getBucketName(), null, object, uploadId, parts, null, null);return true;}/*** 获取文件内容和元信息该文件不存在会抛异常* param object object* return StatObjectResponse*/SneakyThrowspublic StatObjectResponse statObject(String object) {return customMinioClient.statObject(StatObjectArgs.builder().bucket(fileProperties.getBucketName()).object(object).build()).get();}SneakyThrowspublic GetObjectResponse getObject(String object, Long offset, Long contentLength) {return customMinioClient.getObject(GetObjectArgs.builder().bucket(fileProperties.getBucketName()).object(object).offset(offset).length(contentLength).build()).get();}}
四、疑问
我在全部使用aws-s3上传时出现一个问题至今没有办法解决。只能在查询分片的时候用minio的包进行。
分片后调用amazonS3.listParts()一直超时。
这个问题我在 https://gitee.com/Gary2016/minio-upload/issues/I8H8GM 也看到有人跟我有相同的问题
有解决的朋友麻烦评论区告知下方法。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/pingmian/87777.shtml
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!