VUE3大文件分片+worker优化分片速度+node.js示例

首先是文件上传以及分片,话不多说,直接贴代码






worker.js中的代码

import { createChunk } from './c'; // 保留原来的 createChunk(每个 chunk 的 MD5 或数据)

onmessage = async function (e) {
    const { file, start, end, CHUNK_SIZE, fileId } = e.data;
    const result = [];

    for (let i = start; i < end; i++) {

        const chunk = await createChunk(file, i, CHUNK_SIZE);
        result.push({ ...chunk }); // ⬅️ 添加 fileId
    }

    postMessage(result); // ⬅️ 添加 fileId
};

createChunk函数

import SparkMD5 from 'spark-md5'

export function createChunk (file, index, chunkSize) {

    return new Promise((resolve, reject) => {
        const start = index * chunkSize; // 开始位置
        const end = start + chunkSize; // 结束位置
        const spark = new SparkMD5.ArrayBuffer(); // 创建一个md5对象
        const fileReader = new FileReader(); // 创建一个文件读取器
        const blob = file.slice(start, end); // 切割文件
        fileReader.onload = (e) => { // 文件读取完成
            spark.append(e.target.result);
            const checkObj = {
                start,
                end,
                index,
                hash: spark.end(), // 计算md5值
                blob
            }

            resolve(checkObj)
        }
        fileReader.readAsArrayBuffer(blob);
    })
}

解释一下,checkUploadedChunks这个方法主要是给整个文件生成一个MD5编号。cutFile这个方法是给每个分片生成一个MD5编号,主要是为了上传使用

接下来上node(文件名:server.js)代码,重要:启动命令:node server.js

const express = require('express');
const multer = require('multer');
const fs = require('fs');
const fsp = require('fs/promises');
const path = require('path');
const cors = require('cors');

const app = express();
const PORT = 3000;

app.use(cors());
app.use(express.json());

// 配置上传目录
const UPLOAD_DIR = path.resolve(__dirname, 'uploads');
const MERGE_DIR = path.resolve(__dirname, 'merged');

// 动态创建上传目录
function getChunkDir (fileId) {
  return path.resolve(UPLOAD_DIR, fileId);
}

const storage = multer.diskStorage({
  destination: function (req, file, cb) {
    const { fileId } = req.body;
    const chunkDir = getChunkDir(fileId);
    fs.mkdirSync(chunkDir, { recursive: true });
    cb(null, chunkDir);
  },
  filename: function (req, file, cb) {
    const { index } = req.body;
    cb(null, `chunk-${index}`);
  }
});

const upload = multer({ storage });

// 上传接口
app.post('/api/upload', upload.single('chunk'), (req, res) => {
  res.send({ message: '分片上传成功' });
});

// 检查已上传分片
app.post('/api/check', async (req, res) => {
  const { fileId } = req.body;
  const chunkDir = getChunkDir(fileId);
  if (!fs.existsSync(chunkDir)) {
    return res.send({ uploaded: [] });
  }
  const files = await fsp.readdir(chunkDir);
  const uploaded = files.map(name => name.split('-')[1]); // 返回 chunk id(例如 index)
  res.send({ uploaded });
});

// 合并分片
app.post('/api/merge', async (req, res) => {
  const { fileId, fileName } = req.body;
  const chunkDir = getChunkDir(fileId);
  const filePath = path.resolve(MERGE_DIR, fileName);
  await fsp.mkdir(MERGE_DIR, { recursive: true });

  const chunkFiles = await fsp.readdir(chunkDir);
  const sortedChunks = chunkFiles
    .sort((a, b) => Number(a.split('-')[1]) - Number(b.split('-')[1]));

  const writeStream = fs.createWriteStream(filePath);

  for (const chunkFile of sortedChunks) {
    const chunkPath = path.resolve(chunkDir, chunkFile);
    const data = fs.readFileSync(chunkPath);
    writeStream.write(data);
  }

  writeStream.end();
  writeStream.on('finish', () => {
    res.send({ message: '文件合并成功' });
    fs.rmSync(chunkDir, { recursive: true, force: true }); // 删除临时分片
  });
});

app.listen(PORT, () => {
  console.log(` Server running at http://localhost:${PORT}`);
});

完整代码示例






重点:这个思路就是将整个的文件ID传给后端,如果没有上传,就返回给你空数组,上传了,则返回上传的id,然后根据id筛选出没有上传的数据,进行接口上传,其中也写了并发数以及线程数量,速度优化了50%

接下来把断点续传,以及样式和分片进度、上传进度、速度优化交给你们来写

你可能感兴趣的:(node.js)