2
0
mirror of https://github.com/acepanel/panel.git synced 2026-02-04 03:07:20 +08:00

feat: 文件分块上传

This commit is contained in:
2026-01-14 22:59:37 +08:00
parent 11657ab9a3
commit bf3ce388d1
7 changed files with 614 additions and 13 deletions

View File

@@ -60,3 +60,19 @@ type FileUnCompress struct {
File string `form:"file" json:"file" validate:"required|isUnixPath"`
Path string `form:"path" json:"path" validate:"required|isUnixPath"`
}
// ChunkUploadStart 分块上传开始请求
type ChunkUploadStart struct {
Path string `json:"path" validate:"required|isUnixPath"` // 目标目录
FileName string `json:"file_name" validate:"required"` // 文件名
FileHash string `json:"file_hash" validate:"required|len:64"` // 文件SHA256
ChunkCount int `json:"chunk_count" validate:"required|min:1"` // 分块总数
}
// ChunkUploadFinish 分块上传完成请求
type ChunkUploadFinish struct {
Path string `json:"path" validate:"required|isUnixPath"` // 目标目录
FileName string `json:"file_name" validate:"required"` // 文件名
FileHash string `json:"file_hash" validate:"required|len:64"` // 文件SHA256
ChunkCount int `json:"chunk_count" validate:"required|min:1"` // 分块总数
}

View File

@@ -432,6 +432,10 @@ func (route *Http) Register(r *chi.Mux) {
r.Post("/compress", route.file.Compress)
r.Post("/un_compress", route.file.UnCompress)
r.Get("/list", route.file.List)
// 分块上传
r.Post("/chunk/start", route.file.ChunkUploadStart)
r.Post("/chunk/upload", route.file.ChunkUploadChunk)
r.Post("/chunk/finish", route.file.ChunkUploadFinish)
})
r.Route("/log", func(r chi.Router) {

View File

@@ -3,7 +3,9 @@
package service
import (
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
stdio "io"
"net/http"
@@ -553,6 +555,179 @@ func (s *FileService) List(w http.ResponseWriter, r *http.Request) {
})
}
// ChunkUploadStart 开始分块上传
func (s *FileService) ChunkUploadStart(w http.ResponseWriter, r *http.Request) {
req, err := Bind[request.ChunkUploadStart](r)
if err != nil {
Error(w, http.StatusInternalServerError, "%v", err)
return
}
targetPath := filepath.Join(req.Path, req.FileName)
if io.Exists(targetPath) {
Error(w, http.StatusForbidden, s.t.Get("target path %s already exists", targetPath))
return
}
// 确保目标目录存在
if !io.Exists(req.Path) {
if err = stdos.MkdirAll(req.Path, 0755); err != nil {
Error(w, http.StatusInternalServerError, s.t.Get("create directory error: %v", err))
return
}
}
// 扫描目录中已存在的分块文件
prefix := s.getChunkTempFilePrefix(req.FileName, req.FileHash)
entries, err := stdos.ReadDir(req.Path)
if err != nil {
Error(w, http.StatusInternalServerError, s.t.Get("read directory error: %v", err))
return
}
uploadedChunks := make([]int, 0)
for _, entry := range entries {
if entry.IsDir() {
continue
}
name := entry.Name()
if strings.HasPrefix(name, prefix) {
// 提取分块索引
indexStr := strings.TrimPrefix(name, prefix)
if index, err := strconv.Atoi(indexStr); err == nil && index >= 0 && index < req.ChunkCount {
uploadedChunks = append(uploadedChunks, index)
}
}
}
Success(w, chix.M{
"uploaded_chunks": uploadedChunks,
})
}
// ChunkUploadChunk 上传单个分块
func (s *FileService) ChunkUploadChunk(w http.ResponseWriter, r *http.Request) {
if err := r.ParseMultipartForm(100 << 20); err != nil { // 100MB
Error(w, http.StatusUnprocessableEntity, "%v", err)
return
}
path := r.FormValue("path")
fileName := r.FormValue("file_name")
fileHash := r.FormValue("file_hash")
chunkIndex, _ := strconv.Atoi(r.FormValue("chunk_index"))
chunkHash := r.FormValue("chunk_hash")
if path == "" || fileName == "" || fileHash == "" {
Error(w, http.StatusBadRequest, s.t.Get("path, file_name and file_hash are required"))
return
}
// 获取上传的文件
_, handler, err := r.FormFile("file")
if err != nil {
Error(w, http.StatusInternalServerError, s.t.Get("get upload file error: %v", err))
return
}
src, err := handler.Open()
if err != nil {
Error(w, http.StatusInternalServerError, s.t.Get("open upload file error: %v", err))
return
}
defer src.Close()
// 读取分块内容
chunkData, err := stdio.ReadAll(src)
if err != nil {
Error(w, http.StatusInternalServerError, s.t.Get("read chunk data error: %v", err))
return
}
// 校验分块 hash
if chunkHash != "" {
hash := sha256.Sum256(chunkData)
actualHash := hex.EncodeToString(hash[:])
if actualHash != chunkHash {
Error(w, http.StatusBadRequest, s.t.Get("chunk hash mismatch"))
return
}
}
// 保存分块到目标目录
// 格式: .{filename}.{hash前16位}.chunk.{index}
prefix := s.getChunkTempFilePrefix(fileName, fileHash)
chunkPath := filepath.Join(path, fmt.Sprintf("%s%d", prefix, chunkIndex))
if err = stdos.WriteFile(chunkPath, chunkData, 0644); err != nil {
Error(w, http.StatusInternalServerError, s.t.Get("save chunk error: %v", err))
return
}
Success(w, chix.M{
"chunk_index": chunkIndex,
})
}
// ChunkUploadFinish 完成分块上传
func (s *FileService) ChunkUploadFinish(w http.ResponseWriter, r *http.Request) {
req, err := Bind[request.ChunkUploadFinish](r)
if err != nil {
Error(w, http.StatusInternalServerError, "%v", err)
return
}
targetPath := filepath.Join(req.Path, req.FileName)
// 检查目标文件是否已存在
if io.Exists(targetPath) {
Error(w, http.StatusForbidden, s.t.Get("target path %s already exists", targetPath))
return
}
// 创建目标文件
outFile, err := stdos.OpenFile(targetPath, stdos.O_CREATE|stdos.O_WRONLY|stdos.O_TRUNC, 0644)
if err != nil {
Error(w, http.StatusInternalServerError, s.t.Get("create target file error: %v", err))
return
}
defer outFile.Close()
// 按顺序合并分块
prefix := s.getChunkTempFilePrefix(req.FileName, req.FileHash)
var chunkPaths []string
for i := 0; i < req.ChunkCount; i++ {
chunkPath := filepath.Join(req.Path, fmt.Sprintf("%s%d", prefix, i))
chunkPaths = append(chunkPaths, chunkPath)
chunkData, err := stdos.ReadFile(chunkPath)
if err != nil {
// 删除已创建的目标文件
_ = outFile.Close()
_ = stdos.Remove(targetPath)
Error(w, http.StatusInternalServerError, s.t.Get("read chunk %d error: %v", i, err))
return
}
if _, err = outFile.Write(chunkData); err != nil {
_ = outFile.Close()
_ = stdos.Remove(targetPath)
Error(w, http.StatusInternalServerError, s.t.Get("write chunk %d error: %v", i, err))
return
}
}
// 设置权限
s.setPermission(targetPath, 0755, "www", "www")
// 清理临时分块文件
for _, chunkPath := range chunkPaths {
_ = stdos.Remove(chunkPath)
}
Success(w, chix.M{
"path": targetPath,
})
}
// formatDir 格式化目录信息
func (s *FileService) formatDir(base string, entries []stdos.DirEntry) []any {
var paths []any
@@ -607,3 +782,13 @@ func (s *FileService) setPermission(path string, mode stdos.FileMode, owner, gro
_ = io.Chmod(path, mode)
_ = io.Chown(path, owner, group)
}
// getChunkTempFilePrefix 获取分块临时文件前缀
// 格式: .{filename}.{hash前16位}.chunk.
func (s *FileService) getChunkTempFilePrefix(fileName, fileHash string) string {
hashPrefix := fileHash
if len(hashPrefix) > 16 {
hashPrefix = hashPrefix[:16]
}
return fmt.Sprintf(".%s.%s.chunk.", fileName, hashPrefix)
}

View File

@@ -41,6 +41,7 @@
"echarts": "^6.0.0",
"highlight.js": "^11.11.1",
"install": "^0.13.0",
"js-sha256": "^0.11.1",
"lodash-es": "^4.17.21",
"luxon": "^3.7.2",
"marked": "^17.0.0",
@@ -48,6 +49,7 @@
"monaco-editor": "^0.55.1",
"monaco-editor-nginx": "^2.0.2",
"node-forge": "^1.3.1",
"p-limit": "^7.2.0",
"pinia": "^3.0.3",
"pinia-plugin-persistedstate": "^4.5.0",
"remove": "^0.1.5",

25
web/pnpm-lock.yaml generated
View File

@@ -56,6 +56,9 @@ importers:
install:
specifier: ^0.13.0
version: 0.13.0
js-sha256:
specifier: ^0.11.1
version: 0.11.1
lodash-es:
specifier: ^4.17.21
version: 4.17.22
@@ -77,6 +80,9 @@ importers:
node-forge:
specifier: ^1.3.1
version: 1.3.3
p-limit:
specifier: ^7.2.0
version: 7.2.0
pinia:
specifier: ^3.0.3
version: 3.0.4(typescript@5.9.3)(vue@3.5.26(typescript@5.9.3))
@@ -2366,6 +2372,9 @@ packages:
js-base64@3.7.8:
resolution: {integrity: sha512-hNngCeKxIUQiEUN3GPJOkz4wF/YvdUdbNL9hsBcMQTkKzboD7T/q3OYOuuPZLUE6dBxSGpwhk5mwuDud7JVAow==}
js-sha256@0.11.1:
resolution: {integrity: sha512-o6WSo/LUvY2uC4j7mO50a2ms7E/EAdbP0swigLV+nzHKTTaYnaLIWJ02VdXrsJX0vGedDESQnLsOekr94ryfjg==}
js-tokens@4.0.0:
resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==}
@@ -2672,6 +2681,10 @@ packages:
resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==}
engines: {node: '>=10'}
p-limit@7.2.0:
resolution: {integrity: sha512-ATHLtwoTNDloHRFFxFJdHnG6n2WUeFjaR8XQMFdKIv0xkXjrER8/iG9iu265jOM95zXHAfv9oTkqhrfbIzosrQ==}
engines: {node: '>=20'}
p-locate@5.0.0:
resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==}
engines: {node: '>=10'}
@@ -3489,6 +3502,10 @@ packages:
resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
engines: {node: '>=10'}
yocto-queue@1.2.2:
resolution: {integrity: sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ==}
engines: {node: '>=12.20'}
zrender@6.0.0:
resolution: {integrity: sha512-41dFXEEXuJpNecuUQq6JlbybmnHaqqpGlbH1yxnA5V9MMP4SbohSVZsJIwz+zdjQXSSlR1Vc34EgH1zxyTDvhg==}
@@ -6015,6 +6032,8 @@ snapshots:
js-base64@3.7.8: {}
js-sha256@0.11.1: {}
js-tokens@4.0.0: {}
js-tokens@9.0.1: {}
@@ -6335,6 +6354,10 @@ snapshots:
dependencies:
yocto-queue: 0.1.0
p-limit@7.2.0:
dependencies:
yocto-queue: 1.2.2
p-locate@5.0.0:
dependencies:
p-limit: 3.1.0
@@ -7263,6 +7286,8 @@ snapshots:
yocto-queue@0.1.0: {}
yocto-queue@1.2.2: {}
zrender@6.0.0:
dependencies:
tslib: 2.3.0

View File

@@ -10,7 +10,8 @@ export default {
// 删除文件
delete: (path: string): any => http.Post('/file/delete', { path }),
// 上传文件
upload: (formData: FormData): any => http.Post('/file/upload', formData),
upload: (formData: FormData): any =>
http.Post('/file/upload', formData, { meta: { noAlert: true } }),
// 检查文件是否存在
exist: (paths: string[]): any => http.Post('/file/exist', paths),
// 移动文件
@@ -40,5 +41,22 @@ export default {
sort: string,
page: number,
limit: number
): any => http.Get('/file/list', { params: { path, keyword, sub, sort, page, limit } })
): any => http.Get('/file/list', { params: { path, keyword, sub, sort, page, limit } }),
// 分块上传开始
chunkStart: (data: {
path: string
file_name: string
file_hash: string
chunk_count: number
}): any => http.Post('/file/chunk/start', data),
// 上传分块
chunkUpload: (formData: FormData): any =>
http.Post('/file/chunk/upload', formData, { meta: { noAlert: true } }),
// 完成分块上传
chunkFinish: (data: {
path: string
file_name: string
file_hash: string
chunk_count: number
}): any => http.Post('/file/chunk/finish', data)
}

View File

@@ -1,5 +1,7 @@
<script setup lang="ts">
import { sha256 } from 'js-sha256'
import type { UploadCustomRequestOptions, UploadFileInfo, UploadInst } from 'naive-ui'
import pLimit from 'p-limit'
import { useGettext } from 'vue3-gettext'
import api from '@/api/panel/file'
@@ -17,6 +19,304 @@ const fileList = ref<UploadFileInfo[]>([])
// 文件数量阈值,超过此数量需要二次确认
const FILE_COUNT_THRESHOLD = 100
// 大文件阈值,超过此大小使用分块上传 (100MB)
const LARGE_FILE_THRESHOLD = 100 * 1024 * 1024
// 分块大小 (5MB)
const CHUNK_SIZE = 5 * 1024 * 1024
// 分块上传重试次数
const CHUNK_RETRY_COUNT = 10
// 并发上传数
const CONCURRENT_UPLOADS = 3
// 上传速度状态(每个文件独立)
interface UploadProgress {
fileName: string
speed: string
}
const uploadProgressMap = ref<Map<string, UploadProgress>>(new Map())
// 每个上传任务的状态
interface UploadTask {
isCancelled: boolean
activeRequests: { abort: () => void }[]
}
// 以文件唯一标识为 key 存储每个上传任务的状态
const uploadTasks = new Map<string, UploadTask>()
// 获取文件唯一标识
const getFileKey = (file: File) => `${file.name}-${file.size}-${file.lastModified}`
// 取消单个文件的上传
const cancelUpload = (file: File) => {
const fileKey = getFileKey(file)
const task = uploadTasks.get(fileKey)
if (task) {
task.isCancelled = true
task.activeRequests.forEach((req) => req.abort())
uploadTasks.delete(fileKey)
}
}
// 取消所有上传
const cancelAllUploads = () => {
uploadTasks.forEach((task) => {
task.isCancelled = true
task.activeRequests.forEach((req) => req.abort())
})
uploadTasks.clear()
}
// 计算文件标识符(快速,用于断点续传识别)
// 使用文件元数据 + 首尾采样计算,避免读取整个大文件
const calculateFileIdentifier = async (file: File): Promise<string> => {
const sampleSize = 1024 * 1024 // 1MB samples
// 读取首部
const headChunk = file.slice(0, Math.min(sampleSize, file.size))
const headBuffer = await headChunk.arrayBuffer()
// 读取尾部(如果文件足够大)
let tailBuffer: ArrayBuffer
if (file.size > sampleSize * 2) {
const tailChunk = file.slice(file.size - sampleSize, file.size)
tailBuffer = await tailChunk.arrayBuffer()
} else {
tailBuffer = headBuffer
}
// 组合元数据
const metadata = `${file.name}|${file.size}|${file.lastModified}`
const metaBuffer = new TextEncoder().encode(metadata)
// 合并所有数据计算hash
const combined = new Uint8Array(
metaBuffer.byteLength + headBuffer.byteLength + tailBuffer.byteLength
)
combined.set(new Uint8Array(metaBuffer), 0)
combined.set(new Uint8Array(headBuffer), metaBuffer.byteLength)
combined.set(new Uint8Array(tailBuffer), metaBuffer.byteLength + headBuffer.byteLength)
return sha256(combined)
}
// 计算分块SHA256
const calculateChunkHash = async (chunk: Blob): Promise<string> => {
const buffer = await chunk.arrayBuffer()
return sha256(new Uint8Array(buffer))
}
// 格式化速度显示
const formatSpeed = (bytesPerSecond: number): string => {
if (bytesPerSecond < 1024) {
return `${bytesPerSecond.toFixed(0)} B/s`
} else if (bytesPerSecond < 1024 * 1024) {
return `${(bytesPerSecond / 1024).toFixed(1)} KB/s`
} else if (bytesPerSecond < 1024 * 1024 * 1024) {
return `${(bytesPerSecond / 1024 / 1024).toFixed(1)} MB/s`
} else {
return `${(bytesPerSecond / 1024 / 1024 / 1024).toFixed(2)} GB/s`
}
}
// 带重试的分块上传
const uploadChunkWithRetry = async (
formData: FormData,
chunkIndex: number,
chunkSize: number,
onChunkComplete: (size: number) => void,
task: UploadTask
): Promise<void> => {
let lastError: Error | null = null
for (let attempt = 1; attempt <= CHUNK_RETRY_COUNT; attempt++) {
// 检查是否已取消
if (task.isCancelled) {
throw new DOMException('Upload cancelled', 'AbortError')
}
try {
const method = api.chunkUpload(formData)
task.activeRequests.push(method)
try {
await method
onChunkComplete(chunkSize)
return
} finally {
// 从活跃请求列表中移除
const index = task.activeRequests.indexOf(method)
if (index > -1) {
task.activeRequests.splice(index, 1)
}
}
} catch (error) {
// 如果是取消错误,直接抛出
if (task.isCancelled || (error as Error).message?.includes('abort')) {
throw new DOMException('Upload cancelled', 'AbortError')
}
lastError = error as Error
console.warn(
`Chunk ${chunkIndex} upload failed (attempt ${attempt}/${CHUNK_RETRY_COUNT}):`,
error
)
if (attempt < CHUNK_RETRY_COUNT) {
// 等待一段时间后重试,指数退避
await new Promise((resolve) =>
setTimeout(resolve, Math.min(1000 * Math.pow(2, attempt - 1), 10000))
)
}
}
}
throw new Error(
`Chunk ${chunkIndex} upload failed after ${CHUNK_RETRY_COUNT} attempts: ${lastError?.message}`
)
}
// 分块上传
const chunkedUpload = async (
file: File,
onProgress: (e: { percent: number }) => void,
onFinish: () => void,
onError: () => void
) => {
// 创建此文件的上传任务
const fileKey = getFileKey(file)
const task: UploadTask = { isCancelled: false, activeRequests: [] }
uploadTasks.set(fileKey, task)
// 初始化进度显示
uploadProgressMap.value.set(fileKey, { fileName: file.name, speed: '' })
try {
// 计算文件标识符(快速)
onProgress({ percent: 0 })
const fileHash = await calculateFileIdentifier(file)
// 检查是否已取消
if (task.isCancelled) {
throw new DOMException('Upload cancelled', 'AbortError')
}
// 计算分块数量
const chunkCount = Math.ceil(file.size / CHUNK_SIZE)
// 开始分块上传(查询已上传的分块)
const startMethod = api.chunkStart({
path: path.value,
file_name: file.name,
file_hash: fileHash,
chunk_count: chunkCount
})
task.activeRequests.push(startMethod)
const startRes = await startMethod
task.activeRequests = task.activeRequests.filter((r) => r !== startMethod)
const uploadedChunks: Set<number> = new Set(startRes.uploaded_chunks)
// 速度计算相关
let uploadedBytes = uploadedChunks.size * CHUNK_SIZE
let lastTime = Date.now()
let lastBytes = uploadedBytes
// 更新进度和速度
const updateProgress = () => {
const now = Date.now()
const timeDiff = (now - lastTime) / 1000 // 秒
if (timeDiff >= 0.5) {
// 每0.5秒更新一次速度
const bytesDiff = uploadedBytes - lastBytes
const speed = bytesDiff / timeDiff
uploadProgressMap.value.set(fileKey, { fileName: file.name, speed: formatSpeed(speed) })
lastTime = now
lastBytes = uploadedBytes
}
const percent = Math.ceil((uploadedBytes / file.size) * 100)
onProgress({ percent: Math.min(percent, 99) }) // 最多99%留1%给finish
}
// 分块完成回调
const onChunkComplete = (size: number) => {
uploadedBytes += size
updateProgress()
}
// 构建待上传分块列表
const pendingChunks: number[] = []
for (let i = 0; i < chunkCount; i++) {
if (!uploadedChunks.has(i)) {
pendingChunks.push(i)
}
}
// 并发上传单个分块
const uploadChunk = async (chunkIndex: number) => {
// 检查是否已取消
if (task.isCancelled) {
throw new DOMException('Upload cancelled', 'AbortError')
}
const start = chunkIndex * CHUNK_SIZE
const end = Math.min(start + CHUNK_SIZE, file.size)
const chunk = file.slice(start, end)
const chunkSize = end - start
// 计算分块hash
const chunkHash = await calculateChunkHash(chunk)
// 上传分块
const formData = new FormData()
formData.append('path', path.value)
formData.append('file_name', file.name)
formData.append('file_hash', fileHash)
formData.append('chunk_index', chunkIndex.toString())
formData.append('chunk_hash', chunkHash)
formData.append('file', chunk)
await uploadChunkWithRetry(formData, chunkIndex, chunkSize, onChunkComplete, task)
}
// 控制并发
const limit = pLimit(CONCURRENT_UPLOADS)
await Promise.all(pendingChunks.map((chunkIndex) => limit(() => uploadChunk(chunkIndex))))
// 检查是否已取消
if (task.isCancelled) {
throw new DOMException('Upload cancelled', 'AbortError')
}
// 完成分块上传(合并)
const finishMethod = api.chunkFinish({
path: path.value,
file_name: file.name,
file_hash: fileHash,
chunk_count: chunkCount
})
task.activeRequests.push(finishMethod)
await finishMethod
task.activeRequests = task.activeRequests.filter((r) => r !== finishMethod)
onProgress({ percent: 100 })
uploadProgressMap.value.delete(fileKey)
uploadTasks.delete(fileKey)
if (!task.isCancelled) {
onFinish()
window.$message.success($gettext('Upload %{ fileName } successful', { fileName: file.name }))
}
window.$bus.emit('file:refresh')
} catch (error) {
uploadProgressMap.value.delete(fileKey)
uploadTasks.delete(fileKey)
// 如果是取消错误,静默处理
if ((error as Error).name === 'AbortError' || task.isCancelled) {
console.log('Upload cancelled by user')
return
}
console.error('Chunked upload error:', error)
if (!task.isCancelled) {
onError()
}
}
}
// 监听预拖入的文件
watch(
@@ -50,13 +350,12 @@ watch(
// 将文件添加到上传列表
const addFilesToList = (files: File[], autoUpload: boolean = false) => {
const newFiles: UploadFileInfo[] = files.map((file, index) => ({
fileList.value = files.map((file, index) => ({
id: `dropped-${Date.now()}-${index}`,
name: file.name,
status: 'pending' as const,
file: file
}))
fileList.value = newFiles
// 自动开始上传
if (autoUpload) {
@@ -66,34 +365,67 @@ const addFilesToList = (files: File[], autoUpload: boolean = false) => {
}
}
// 监听弹窗关闭,清空文件列表
// 监听弹窗关闭,清空文件列表并取消上传
watch(show, (val) => {
if (!val) {
cancelAllUploads()
fileList.value = []
}
})
const uploadRequest = ({ file, onFinish, onError, onProgress }: UploadCustomRequestOptions) => {
const fileObj = file.file as File
// 大文件使用分块上传
if (fileObj.size > LARGE_FILE_THRESHOLD) {
chunkedUpload(fileObj, onProgress, onFinish, onError)
return
}
// 小文件使用普通上传
const fileKey = getFileKey(fileObj)
const task: UploadTask = { isCancelled: false, activeRequests: [] }
uploadTasks.set(fileKey, task)
const formData = new FormData()
formData.append('path', `${path.value}/${file.name}`)
formData.append('file', file.file as File)
const { uploading } = useRequest(api.upload(formData))
formData.append('file', fileObj)
const method = api.upload(formData)
task.activeRequests.push(method)
const { uploading } = useRequest(method)
.onSuccess(() => {
onFinish()
window.$bus.emit('file:refresh')
window.$message.success($gettext('Upload %{ fileName } successful', { fileName: file.name }))
uploadTasks.delete(fileKey)
if (!task.isCancelled) {
onFinish()
window.$bus.emit('file:refresh')
window.$message.success($gettext('Upload %{ fileName } successful', { fileName: file.name }))
}
})
.onError(() => {
onError()
uploadTasks.delete(fileKey)
if (!task.isCancelled) {
onError()
}
})
.onComplete(() => {
stopWatch()
})
const stopWatch = watch(uploading, (progress) => {
onProgress({ percent: Math.ceil((progress.loaded / progress.total) * 100) })
if (!task.isCancelled) {
onProgress({ percent: Math.ceil((progress.loaded / progress.total) * 100) })
}
})
}
// 处理文件移除(取消上传)
const handleRemove = ({ file }: { file: UploadFileInfo }) => {
if (file.file) {
cancelUpload(file.file)
}
}
// 处理文件选择变化(用于文件数量确认)
const handleChange = (data: { fileList: UploadFileInfo[] }) => {
const newFiles = data.fileList.filter(
@@ -131,6 +463,17 @@ const handleChange = (data: { fileList: UploadFileInfo[] }) => {
:segmented="false"
>
<n-flex vertical>
<!-- 上传速度显示 -->
<n-flex
v-for="[key, progress] in uploadProgressMap"
:key="key"
justify="space-between"
align="center"
class="upload-speed-bar"
>
<NText>{{ progress.fileName }}</NText>
<NText type="success">{{ progress.speed || $gettext('Preparing...') }}</NText>
</n-flex>
<n-upload
ref="upload"
v-model:file-list="fileList"
@@ -138,6 +481,7 @@ const handleChange = (data: { fileList: UploadFileInfo[] }) => {
directory-dnd
:custom-request="uploadRequest"
@change="handleChange"
@remove="handleRemove"
>
<n-upload-dragger>
<div style="margin-bottom: 12px">
@@ -155,4 +499,11 @@ const handleChange = (data: { fileList: UploadFileInfo[] }) => {
</n-modal>
</template>
<style scoped lang="scss"></style>
<style scoped lang="scss">
.upload-speed-bar {
padding: 8px 12px;
background: var(--n-color-embedded);
border-radius: 4px;
margin-bottom: 12px;
}
</style>