最近在使用antdvue 和spring boot 上传大文件的时候,发现simple-vue-upload是个非常好用的前端组件,结合spring boot简单记录一下整体上传代码。
1.因为分片信息用redis存储,因此在pom中添加redis依赖
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
2.在application.properties中配置redis信息:
spring.redis.host=localhost
spring.redis.port=6379
spring.redis.password=password
spring.redis.database=4
spring.redis.timeout = 30000ms
spring.redis.jedis.pool.max-active=200
spring.redis.jedis.pool.max-idle=0
spring.redis.lettuce.pool.max-idle=5
spring.redis.jedis.pool.max-wait=20000ms
#文件大小设置
spring.servlet.multipart.max-file-size=500MB
spring.servlet.multipart.max-request-size=500MB
tempDir=e:/test/temp
uploadDir=e:/test/upload
spring.cache.type=GENERIC
3.分片信息类:Chunk.java
package cn.abdl.antdv.domain.VO;
import org.springframework.web.multipart.MultipartFile;
public class Chunk {
private Long id;
/**
* 当前文件块,从1开始
*/
private Integer chunkNumber;
/**
* 分块大小
*/
private Long chunkSize;
/**
* 当前分块大小
*/
private Long currentChunkSize;
/**
* 总大小
*/
private Long totalSize;
/**
* 文件标识
*/
private String identifier;
/**
* 文件名
*/
private String filename;
/**
* 相对路径
*/
private String relativePath;
/**
* 总块数
*/
private Integer totalChunks;
/**
* 文件类型
*/
private String type;
/**
* 要上传的文件
*/
private MultipartFile file;
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public Integer getChunkNumber() {
return chunkNumber;
}
public void setChunkNumber(Integer chunkNumber) {
this.chunkNumber = chunkNumber;
}
public Long getChunkSize() {
return chunkSize;
}
public void setChunkSize(Long chunkSize) {
this.chunkSize = chunkSize;
}
public Long getCurrentChunkSize() {
return currentChunkSize;
}
public void setCurrentChunkSize(Long currentChunkSize) {
this.currentChunkSize = currentChunkSize;
}
public Long getTotalSize() {
return totalSize;
}
public void setTotalSize(Long totalSize) {
this.totalSize = totalSize;
}
public String getIdentifier() {
return identifier;
}
public void setIdentifier(String identifier) {
this.identifier = identifier;
}
public String getFilename() {
return filename;
}
public void setFilename(String filename) {
this.filename = filename;
}
public String getRelativePath() {
return relativePath;
}
public void setRelativePath(String relativePath) {
this.relativePath = relativePath;
}
public Integer getTotalChunks() {
return totalChunks;
}
public void setTotalChunks(Integer totalChunks) {
this.totalChunks = totalChunks;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public MultipartFile getFile() {
return file;
}
public void setFile(MultipartFile file) {
this.file = file;
}
}
4.AbstractBaseRedisDao.java
package cn.abdl.antdv.redis;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.data.redis.serializer.RedisSerializer;
import javax.annotation.Resource;
public abstract class AbstractBaseRedisDao<K, V> {
@Resource(name = "redisTemplate")
protected RedisTemplate<K, V> redisTemplate;
/**
* 设置redisTemplate
*
* @param redisTemplate
*/
public void setRedisTemplate(RedisTemplate<K, V> redisTemplate) {
this.redisTemplate = redisTemplate;
}
/**
* 获取 RedisSerializer
*/
protected RedisSerializer<String> getRedisSerializer() {
return redisTemplate.getStringSerializer();
}
}
5.redis工具类 RedisDao.java
package cn.abdl.antdv.redis;
import java.util.List;
import java.util.Map;
import java.util.Set;
public interface RedisDao {
/**
* RedisDao接口类
*/
/**
* 判断缓存中是否有对应的value
*
* @param key
* @return boolean
*/
public boolean existsKey(Object key);
/**
* 根据key获取key列表(key值可为模糊匹配---taskInfo:taskDetail:* <---> *代表任意字符)
*
* @param pattern
* @return Set<Object>
*/
public Set<Object> keys(Object pattern);
/**
* 根据key删除对应的value
*
* @param key
*/
public boolean delete(Object key);
/**
* 根据key获取个数
*
* @param key
*/
public int count(Object key);
/**
* 批量删除对应的value
*
* @param keys
*/
public void delete(String[] keys);
/**
* 批量删除key(key值可为模糊匹配---taskInfo:taskDetail:* <---> *代表任意字符)
*
* @param pattern
*/
public long deletePattern(Object pattern);
/**
* 批量删除对应的value
*
* @param keys
*/
public long delete(Set<Object> keys);
/**
* 写入缓存(操作字符串)
*
* @param key
* @param value
* @return boolean
*/
public boolean vSet(Object key, Object value);
/**
* 写入缓存设置时效时间(操作字符串)
*
* @param key
* @param value
* @return boolean
*/
public boolean vSet(Object key, Object value, Long expireTime);
/**
* 更新写入缓存设置时效时间(操作字符串)
*
* @param key
* @return boolean
*/
public boolean vSetUpdate(Object key, Long expireTime);
/**
* 读取缓存(操作字符串)
*
* @param key
* @return Object
*/
public Object vGet(Object key);
/**
* 哈希 添加(操作hash)
*
* @param key
* @param hashKey
* @param value
*/
public void hmSet(Object key, Object hashKey, Object value);
/**
* 哈希 添加(操作hash)
*
* @param key
* @param map
*/
public void hmSetAll(Object key, Map<Object, Object> map);
/**
* 哈希获取数据(操作hash)
*
* @param key
* @return Map<Object, Object>
*/
public Map<Object, Object> hmGet(Object key);
/**
* 哈希获取数据(操作hash)
*
* @param key
* @param hashKey
* @return Object
*/
public Object hmGet(Object key, Object hashKey);
/**
* 哈希删除数据(操作hash)
*
* @param key
* @param hashKey
* @return Object
*/
public Object hmDel(Object key, Object hashKey);
/**
* 获取列表中个数
*
* @param k
* @return long
*/
public long lSize(Object k);
/**
* 通过其索引从列表获取第一个元素(操作list)
*
* @param key
* @return Object
*/
public Object lindexFirst(Object key);
/**
* 通过其索引从列表获取元素(操作list)
*
* @param key
* @param index:索引位置,从0开始
* @return Object
*/
public Object lindex(Object key, long index);
/**
* 从左向右添加列表(操作list)
*
* @param k
* @param v
*/
public void lLeftPush(Object k, Object v);
/**
* 从左向右添加列表(操作list);如果bool=true,会删除列表中已经存在的数据,然后再进行添加(仅针对字符串列表,其它待测)
*
* @param k
* @param v
* @param bool
*/
public void lLeftPush(Object k, Object v, boolean bool);
/**
* 从左向右添加列表(操作list)
*
* @param k
* @param lst
*/
public void lLeftPushAll(Object k, List<Object> lst);
/**
* 从右向左添加列表(操作list)
*
* @param k
* @param v
*/
public void lRightPush(Object k, Object v);
/**
* 从右向左添加列表(操作list);如果bool=true,会删除列表中已经存在的数据,然后再进行添加(仅针对字符串列表,其它待测)
*
* @param k
* @param v
* @param bool
*/
public void lRightPush(Object k, Object v, boolean bool);
/**
* 从右向左添加列表(操作list)
*
* @param k
* @param lst
*/
public void lRightPushAll(Object k, List<Object> lst);
/**
* 删除并获取列表中的第1个元素(操作list)
*
* @param k
* @return Object
*/
public Object lLeftPop(Object k);
/**
* 删除并获取列表中的最后1个元素(操作list)
*
* @param k
* @return Object
*/
public Object lRightPop(Object k);
/**
* 移除k中的count个,返回删除的个数;如果没有这个元素则返回0(操作list)
*
* @param k
* @param count
* @return long
*/
public long lRemove(Object k, long count);
/**
* 移除k中值为v的count个,返回删除的个数;如果没有这个元素则返回0(操作list)
*
* @param k
* @param count
* @param v
* @return long
*/
public long lRemove(Object k, long count, Object v);
/**
* 移除k中值为v的所有数据,返回删除的个数;如果没有这个元素则返回0(操作list)
*
* @param k
* @param v
* @param v
* @return long
*/
public long lRemove(Object k, Object v);
/**
* 根据key获取获取List列表(操作list)
*
* @param key
* @return Object
*/
public Object lRange(Object key);
/**
* 根据key获取列表中第start至end的数据(操作list)
*
* @param k
* @param start
* @param end
* @return List<Object>
*/
public List<?> lRange(Object k, long start, long end);
/**
* 集合添加
*
* @param key
* @param value
*/
public void sAdd(Object key, Object value);
/**
* 集合获取
*
* @param key
* @return Set<Object>
*/
public Set<Object> sMembers(Object key);
/**
* 有序集合添加
*
* @param key
* @param value
* @param scoure
*/
public void zAdd(Object key, Object value, double scoure);
/**
* 有序集合获取
*
* @param key
* @param scoure
* @param scoure1
* @return Set<Object>
*/
public Set<Object> rangeByScore(Object key, double scoure, double scoure1);
/**
* 将hashKey中储存的数字加上指定的增量值(操作hash)
*
* @param key
* @param value
* @return boolean
*/
public void hmSetIncrement(Object key, Object hashKey, Long value);
}
6.RedisDaoImpl.java 实现
package cn.abdl.antdv.redis;
import org.springframework.data.redis.core.*;
import org.springframework.stereotype.Repository;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
@Repository("redisDao")
public class RedisDaoImpl extends AbstractBaseRedisDao<Object, Object> implements RedisDao{
@Override
public boolean existsKey(final Object key) {
return redisTemplate.hasKey(key);
}
@Override
public Set<Object> keys(final Object pattern) {
return redisTemplate.keys(pattern);
}
@Override
public boolean delete(final Object key) {
return redisTemplate.delete(key);
}
@Override
public int count(final Object key) {
return redisTemplate.keys(key).size();
}
@Override
public long deletePattern(final Object pattern) {
Set<Object> keys = redisTemplate.keys(pattern);
if ((keys != null ? keys.size() : 0) > 0) {
return redisTemplate.delete(keys);
} else {
return 0;
}
}
@Override
public void delete(final String[] keys) {
for (String key : keys) {
delete(key);
}
}
@Override
public long delete(final Set<Object> keys) {
return redisTemplate.delete(keys);
}
@Override
public boolean vSet(final Object key, Object value) {
boolean result = false;
try {
ValueOperations<Object, Object> operations = redisTemplate.opsForValue();
operations.set(key, value);
result = true;
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
@Override
public boolean vSet(final Object key, Object value, Long expireTime) {
boolean result = false;
try {
ValueOperations<Object, Object> operations = redisTemplate.opsForValue();
operations.set(key, value);
redisTemplate.expire(key, expireTime, TimeUnit.SECONDS);
result = true;
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
@Override
public boolean vSetUpdate(final Object key, Long expireTime) {
boolean result = false;
try {
redisTemplate.expire(key, expireTime, TimeUnit.SECONDS);
result = true;
} catch (Exception e) {
e.printStackTrace();
}
return result;
}
@Override
public Object vGet(final Object key) {
Object result = null;
ValueOperations<Object, Object> operations = redisTemplate.opsForValue();
result = operations.get(key);
return result;
}
@Override
public void hmSet(Object key, Object hashKey, Object value) {
HashOperations<Object, Object, Object> hash = redisTemplate.opsForHash();
hash.put(key, hashKey, value);
}
@Override
public void hmSetAll(Object key, Map<Object, Object> map) {
HashOperations<Object, Object, Object> hash = redisTemplate.opsForHash();
hash.putAll(key, map);
}
@Override
public Map<Object, Object> hmGet(Object key) {
HashOperations<Object, Object, Object> hash = redisTemplate.opsForHash();
return hash.entries(key);
}
@Override
public Object hmGet(Object key, Object hashKey) {
HashOperations<Object, Object, Object> hash = redisTemplate.opsForHash();
return hash.get(key, hashKey);
}
@Override
public Object hmDel(Object key, Object hashKey) {
HashOperations<Object, Object, Object> hash = redisTemplate.opsForHash();
return hash.delete(key, hashKey);
}
@Override
public long lSize(Object k) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.size(k);
}
@Override
public Object lRange(Object k) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.range(k, 0, list.size(k));
}
@Override
public List<?> lRange(Object k, long start, long end) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.range(k, start, end);
}
@Override
public Object lindexFirst(Object k) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.index(k, 0);
}
@Override
public Object lindex(Object k, long index) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.index(k, index);
}
@Override
public void lLeftPush(Object k, Object v) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
list.leftPush(k, v);
}
@Override
public void lLeftPush(Object k, Object v, boolean bool) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
if (bool) {
list.remove(k, list.size(k), v);
}
list.leftPush(k, v);
}
@Override
public void lLeftPushAll(Object k, List<Object> lst) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
list.leftPushAll(k, lst);
}
@Override
public void lRightPush(Object k, Object v, boolean bool) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
if (bool) {
list.remove(k, list.size(k), v);
}
list.rightPush(k, v);
}
@Override
public void lRightPush(Object k, Object v) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
list.rightPush(k, v);
}
@Override
public void lRightPushAll(Object k, List<Object> lst) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
list.rightPushAll(k, lst);
}
@Override
public Object lLeftPop(Object k) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.leftPop(k);
}
@Override
public Object lRightPop(Object k) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.rightPop(k);
}
@Override
public long lRemove(Object k, long count) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.remove(k, 0, null);
}
@Override
public long lRemove(Object k, long count, Object v) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.remove(k, count, v);
}
@Override
public long lRemove(Object k, Object v) {
ListOperations<Object, Object> list = redisTemplate.opsForList();
return list.remove(k, list.size(k), v);
}
@Override
public void sAdd(Object key, Object value) {
SetOperations<Object, Object> set = redisTemplate.opsForSet();
set.add(key, value);
}
@Override
public Set<Object> sMembers(Object key) {
SetOperations<Object, Object> set = redisTemplate.opsForSet();
return set.members(key);
}
@Override
public void zAdd(Object key, Object value, double scoure) {
ZSetOperations<Object, Object> zset = redisTemplate.opsForZSet();
zset.add(key, value, scoure);
}
@Override
public Set<Object> rangeByScore(Object key, double scoure, double scoure1) {
ZSetOperations<Object, Object> zset = redisTemplate.opsForZSet();
return zset.rangeByScore(key, scoure, scoure1);
}
@Override
public void hmSetIncrement(Object key, Object hashKey, Long value) {
HashOperations<Object, Object, Object> hash = redisTemplate.opsForHash();
hash.increment(key, hashKey, value);
}
}
7.IUploadService.java
package cn.abdl.antdv.service;
import cn.abdl.antdv.domain.VO.Chunk;
import java.util.Map;
public interface IUploadService {
Map<String, Object> checkChunkExits(Chunk chunk);
Integer saveChunk(Integer chunkNumber, String identifier);
void mergeFile(String fileName, String chunkFolder);
}
8.UploadServiceImpl.java
package cn.abdl.antdv.service.impl;
import cn.abdl.antdv.domain.VO.Chunk;
import cn.abdl.antdv.redis.RedisDao;
import cn.abdl.antdv.service.IUploadService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.*;
@Service
public class UploadServiceImpl implements IUploadService {
@Autowired
private RedisDao redisDao;
private static final String mergeFolder = "D:\\data\\upload\\merge";
@Override
public Map<String, Object> checkChunkExits(Chunk chunk) {
Map<String, Object> res = new HashMap<>();
String identifier = chunk.getIdentifier();
if (redisDao.existsKey(identifier)) {
Set<Integer> chunkNumber = (Set<Integer>) redisDao.hmGet(identifier, "chunkNumberList");
res.put("chunkNumbers",chunkNumber);
}
return res;
}
@Override
public Integer saveChunk(Integer chunkNumber, String identifier) {
Set<Integer> oldChunkNumber = (Set<Integer>) redisDao.hmGet(identifier, "chunkNumberList");
if (Objects.isNull(oldChunkNumber)) {
Set<Integer> newChunkNumber = new HashSet<>();
newChunkNumber.add(chunkNumber);
redisDao.hmSet(identifier, "chunkNumberList", newChunkNumber);
return newChunkNumber.size();
} else {
oldChunkNumber.add(chunkNumber);
redisDao.hmSet(identifier, "chunkNumberList", oldChunkNumber);
return oldChunkNumber.size();
}
}
@Override
public void mergeFile(String fileName, String chunkFolder) {
try {
if (!Files.isWritable(Paths.get(mergeFolder))) {
Files.createDirectories(Paths.get(mergeFolder));
}
String target = mergeFolder + File.separator + fileName;
Files.createFile(Paths.get(target));
Files.list(Paths.get(chunkFolder))
.filter(path -> path.getFileName().toString().contains("-"))
.sorted((o1, o2) -> {
String p1 = o1.getFileName().toString();
String p2 = o2.getFileName().toString();
int i1 = p1.lastIndexOf("-");
int i2 = p2.lastIndexOf("-");
return Integer.valueOf(p2.substring(i2)).compareTo(Integer.valueOf(p1.substring(i1)));
})
.forEach(path -> {
try {
//以追加的形式写入文件
Files.write(Paths.get(target), Files.readAllBytes(path), StandardOpenOption.APPEND);
//合并后删除该块
Files.delete(path);
} catch (IOException e) {
e.printStackTrace();
}
});
} catch (IOException e) {
e.printStackTrace();
}
}
}
9.ChunkUploadController.java 接口
package cn.abdl.antdv.web;
import cn.abdl.antdv.domain.VO.Chunk;
import cn.abdl.antdv.service.IUploadService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.multipart.MultipartFile;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.Map;
@RestController
@RequestMapping("/api/upload")
public class ChunkUploadController {
private static Logger log = LoggerFactory.getLogger(ChunkUploadController.class);
private final static String CHUNK_FOLDER = "D:\\data\\upload\\chunck";
private final static String SINGLE_FOLDER = "D:\\data\\upload\\single";
@Autowired
private IUploadService uploadService;
/**
* 上传单个文件
* @param chunk
*/
@PostMapping("/single")
public void singleUpload(Chunk chunk) {
MultipartFile file = chunk.getFile();
String filename = chunk.getFilename();
try {
byte[] bytes = file.getBytes();
if (!Files.isWritable(Paths.get(SINGLE_FOLDER))) {
Files.createDirectories(Paths.get(SINGLE_FOLDER));
}
Path path = Paths.get(SINGLE_FOLDER,filename);
Files.write(path, bytes);
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 判断分片是否已上传
* @param chunk
* @return
*/
@GetMapping("/chunk")
public Map<String, Object> checkChunks(Chunk chunk) {
return uploadService.checkChunkExits(chunk);
}
/**
* 上传分片
* @param chunk
* @return
*/
@PostMapping("/chunk")
public Map<String, Object> saveChunk(Chunk chunk) {
MultipartFile file = chunk.getFile();
Integer chunkNumber = chunk.getChunkNumber();
String identifier = chunk.getIdentifier();
byte[] bytes;
try {
bytes = file.getBytes();
Path path = Paths.get(generatePath(CHUNK_FOLDER, chunk));
Files.write(path, bytes);
} catch (IOException e) {
e.printStackTrace();
}
Integer chunks = uploadService.saveChunk(chunkNumber, identifier);
Map<String, Object> result = new HashMap<>();
if (chunks.equals(chunk.getTotalChunks())) {
result.put("message","上传成功!");
result.put("code", 205);
}
return result;
}
/**
* 合并已上传分片
* @param chunk
*/
@PostMapping("/merge")
public void mergeChunks(Chunk chunk) {
String fileName = chunk.getFilename();
uploadService.mergeFile(fileName,CHUNK_FOLDER + File.separator + chunk.getIdentifier());
}
private static String generatePath(String uploadFolder, Chunk chunk) {
StringBuilder sb = new StringBuilder();
sb.append(uploadFolder).append(File.separator).append(chunk.getIdentifier());
//判断uploadFolder/identifier 路径是否存在,不存在则创建
if (!Files.isWritable(Paths.get(sb.toString()))) {
try {
Files.createDirectories(Paths.get(sb.toString()));
} catch (IOException e) {
log.error(e.getMessage(), e);
}
}
return sb.append(File.separator)
.append(chunk.getFilename())
.append("-")
.append(chunk.getChunkNumber()).toString();
}
}
9.接下来是vue
<template>
<div>
<a-button type="primary" @click="showModal">Open Modal</a-button>
<a-modal width="1000px" v-model="visible" title="Basic Modal" @ok="handleOk">
<uploader
ref="uploader"
:key="uploader_key"
:options="options"
:auto-start="false"
class="uploader-example"
@file-success="onFileSuccess"
@file-added="filesAdded"
@files-added="onFilesAdded"
@file-complete="onFileComplete"
>
<uploader-unsupport></uploader-unsupport>
<uploader-drop>
<uploader-btn :single="true" >选择文件</uploader-btn>
</uploader-drop>
<uploader-list></uploader-list>
</uploader>
</a-modal>
</div>
</template>
<script>
import SparkMD5 from 'spark-md5'
import axios from 'axios'
export default {
name: 'ModelList',
data () {
return {
visible: false,
uploader_key: new Date().getTime(), // 这个用来刷新组件--解决不刷新页面连续上传的缓存上传数据(注:每次上传时,强制这个值进行更改---根据自己的实际情况重新赋值)
options: {
target: '/api/upload/chunk', // SpringBoot后台接收文件夹数据的接口
testChunks: false, // 是否测试分片
headers: { Authorization: '' }
},
uploaded: 0,
uuid: ''
}
},
props: {
msg: String
},
mounted () {
this.uuid = this.getUuid()
},
methods: {
onFileSuccess: function (rootFile, file, response, chunk) {
console.log(rootFile)
console.log(file)
console.log(response)
console.log(chunk)
},
/**
* 计算md5,实现断点续传及秒传
* @param file
*/
computeMD5 (file) {
// 大文件的md5计算时间比较长,显示个进度条
this.$message.info('正在计算MD5')
const fileReader = new FileReader()
const time = new Date().getTime()
const blobSlice = File.prototype.slice || File.prototype.mozSlice || File.prototype.webkitSlice
let currentChunk = 0
const chunkSize = 10 * 1024 * 1000
const chunks = Math.ceil(file.size / chunkSize)
const spark = new SparkMD5.ArrayBuffer()
file.pause()
loadNext()
fileReader.onload = e => {
spark.append(e.target.result)
if (currentChunk < chunks) {
currentChunk++
loadNext()
// 实时展示MD5的计算进度
this.$nextTick(() => {
console.log('校验MD5 ' + ((currentChunk / chunks) * 100).toFixed(0) + '%')
})
} else {
const md5 = spark.end()
this.computeMD5Success(md5, file)
console.log(`MD5计算完毕:${file.name} \nMD5:${md5} \n分片:${chunks} 大小:${file.size} 用时:${new Date().getTime() - time} ms`)
}
}
fileReader.onerror = function () {
this.$message.error(`文件${file.name}读取出错,请检查该文件`)
file.cancel()
}
function loadNext () {
const start = currentChunk * chunkSize
const end = ((start + chunkSize) >= file.size) ? file.size : start + chunkSize
fileReader.readAsArrayBuffer(blobSlice.call(file.file, start, end))
}
},
computeMD5Success (md5, file) {
file.uniqueIdentifier = md5// 把md5值作为文件的识别码
// file.resume()// 开始上传
},
/**
* 添加文件后触发
* @param file
* @param event
*/
filesAdded (file, event) {
this.computeMD5(file)
},
onFilesAdded (file, event) {
console.log('files added')
},
onFileComplete (rootFile, file, response, chunk) {
console.log('onFileComplete')
const uploaderInstance = this.$refs.uploader.uploader
this.uploaded++
if (uploaderInstance.fileList.length === this.uploaded) {
console.log('all finished')
// eslint-disable-next-line eqeqeq
const formData = new FormData()
formData.append('identifier', file.uniqueIdentifier)
formData.append('filename', file.name)
this.merge(formData)
}
},
showModal () {
this.visible = true
},
handleOk () {
const uploaderInstance = this.$refs.uploader.uploader
uploaderInstance.resume()
},
merge (formdata) {
axios.post('/api/upload/merge', formdata).then((res) => {
console.log('res:' + JSON.stringify(res))
}).catch(e => {
console.log('res:' + JSON.stringify(e))
})
},
getUuid () {
var s = []
var hexDigits = '0123456789abcdef'
for (var i = 0; i < 36; i++) {
s[i] = hexDigits.substr(Math.floor(Math.random() * 0x10), 1)
}
s[14] = '4' // bits 12-15 of the time_hi_and_version field to 0010
s[19] = hexDigits.substr((s[19] & 0x3) | 0x8, 1) // bits 6-7 of the clock_seq_hi_and_reserved to 01
s[8] = s[13] = s[18] = s[23] = '-'
var uuid = s.join('')
return uuid
}
}
}
</script>
<style>
.uploader-example {
width: 90%;
padding: 15px;
margin: 40px auto 0;
font-size: 12px;
box-shadow: 0 0 10px rgba(0, 0, 0, .4);
}
.uploader-example .uploader-btn {
margin-right: 4px;
}
.uploader-example .uploader-list {
max-height: 440px;
overflow: auto;
overflow-x: hidden;
overflow-y: auto;
}
</style>
评论 (0)