From 440dd0bcfe1d0bc8138cc4ef001e02accaec1c3d Mon Sep 17 00:00:00 2001 From: twoonefour Date: Mon, 25 Aug 2025 23:41:39 +0800 Subject: [PATCH 1/9] fix(stream): http chucked upload issue --- internal/stream/stream.go | 75 +++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 35 deletions(-) diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 947727612..c1145549f 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -4,18 +4,17 @@ import ( "context" "errors" "fmt" + "github.com/OpenListTeam/OpenList/v4/internal/conf" "io" "math" "os" "sync" - "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/buffer" "github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/utils" - "github.com/rclone/rclone/lib/mmap" "go4.org/readerutil" ) @@ -179,47 +178,53 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { // 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大 func (f *FileStream) cache(maxCacheSize int64) (model.File, error) { - if maxCacheSize > int64(conf.MaxBufferLimit) { - tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize()) - if err != nil { - return nil, err - } - f.Add(tmpF) - f.tmpFile = tmpF - f.Reader = tmpF - return tmpF, nil - } + limit := int64(conf.MaxBufferLimit) + // TODO: 这里不会改,我写成了buf := make([]byte, 64<<10)的形式 + //var buf []byte + //if conf.MmapThreshold > 0 && limit >= int64(conf.MmapThreshold) { + // m, err := mmap.Alloc(int(limit)) + // if err == nil { + // f.Add(utils.CloseFunc(func() error { + // return mmap.Free(m) + // })) + // buf = m + // } + //} if f.peekBuff == nil { f.peekBuff = &buffer.Reader{} f.oriReader = f.Reader } - bufSize := maxCacheSize - int64(f.peekBuff.Len()) - var buf []byte - if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) { - m, err := mmap.Alloc(int(bufSize)) - if err == nil { - f.Add(utils.CloseFunc(func() error { - return mmap.Free(m) - })) - buf = m + var readBytes int + // precache first `limit` byte + for int64(readBytes) < limit { + buf := make([]byte, 64<<10) + want := limit - int64(readBytes) + if want > int64(len(buf)) { + want = int64(len(buf)) + } + n, err := f.oriReader.Read(buf[:want]) + if n > 0 { + f.peekBuff.Append(buf[:n]) + readBytes += n + } + if err == io.EOF { + f.Reader = f.peekBuff + f.oriReader = nil + // should update real file size here to solve `GetSize == 0` issue + f.size = int64(readBytes) + return f.peekBuff, nil } } - if buf == nil { - buf = make([]byte, bufSize) - } - n, err := io.ReadFull(f.oriReader, buf) - if bufSize != int64(n) { - return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err) - } - f.peekBuff.Append(buf) - if int64(f.peekBuff.Len()) >= f.GetSize() { - f.Reader = f.peekBuff - f.oriReader = nil - } else { - f.Reader = io.MultiReader(f.peekBuff, f.oriReader) + // if file is larger than MaxBufferLimit, fallback to disk + tmpF, err := utils.CreateTempFile(io.MultiReader(f.peekBuff, f.oriReader), f.GetSize()) + if err != nil { + return nil, err } - return f.peekBuff, nil + f.Add(tmpF) + f.tmpFile = tmpF + f.Reader = tmpF + return tmpF, nil } func (f *FileStream) SetTmpFile(file model.File) { From d6dd66c53abc18095d8df025421b70d4c6cd8eeb Mon Sep 17 00:00:00 2001 From: twoonefour Date: Tue, 26 Aug 2025 00:09:52 +0800 Subject: [PATCH 2/9] fix(stream): use MmapThreshold --- internal/stream/stream.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/internal/stream/stream.go b/internal/stream/stream.go index c1145549f..70d0f359c 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "github.com/OpenListTeam/OpenList/v4/internal/conf" + "github.com/rclone/rclone/lib/mmap" "io" "math" "os" @@ -179,26 +180,30 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { func (f *FileStream) cache(maxCacheSize int64) (model.File, error) { limit := int64(conf.MaxBufferLimit) - // TODO: 这里不会改,我写成了buf := make([]byte, 64<<10)的形式 - //var buf []byte - //if conf.MmapThreshold > 0 && limit >= int64(conf.MmapThreshold) { - // m, err := mmap.Alloc(int(limit)) - // if err == nil { - // f.Add(utils.CloseFunc(func() error { - // return mmap.Free(m) - // })) - // buf = m - // } - //} if f.peekBuff == nil { f.peekBuff = &buffer.Reader{} f.oriReader = f.Reader } + var buf []byte + bufSize := 64 << 10 // 64KB as default + if conf.MmapThreshold > 0 && bufSize >= conf.MmapThreshold { + m, err := mmap.Alloc(bufSize) + if err == nil { + f.Add(utils.CloseFunc(func() error { + return mmap.Free(m) + })) + buf = m + } + } + var readBytes int // precache first `limit` byte for int64(readBytes) < limit { - buf := make([]byte, 64<<10) + if buf == nil { + buf = make([]byte, bufSize) + } + want := limit - int64(readBytes) if want > int64(len(buf)) { want = int64(len(buf)) From 70b44faf9045f2697de84d5db86a8bd73d8a4889 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Wed, 27 Aug 2025 20:16:10 +0800 Subject: [PATCH 3/9] fix(stream): improve caching mechanism and handle size=0 case --- internal/stream/stream.go | 123 ++++++++++++++++++++++++++------------ pkg/buffer/bytes.go | 34 +++++------ pkg/buffer/bytes_test.go | 2 +- pkg/buffer/file.go | 88 +++++++++++++++++++++++++++ 4 files changed, 190 insertions(+), 57 deletions(-) create mode 100644 pkg/buffer/file.go diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 70d0f359c..cbd999ba1 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -4,18 +4,18 @@ import ( "context" "errors" "fmt" - "github.com/OpenListTeam/OpenList/v4/internal/conf" - "github.com/rclone/rclone/lib/mmap" "io" "math" "os" "sync" + "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/buffer" "github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/rclone/rclone/lib/mmap" "go4.org/readerutil" ) @@ -137,6 +137,61 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ if writer != nil { reader = io.TeeReader(reader, writer) } + + if f.GetSize() == 0 { + if f.peekBuff == nil { + f.peekBuff = &buffer.Reader{} + } + // 检查是否有数据 + buf := make([]byte, 64*utils.KB) + n, err := io.ReadFull(reader, buf) + if err == io.ErrUnexpectedEOF { + if n > 0 { + f.peekBuff.Append(buf[:n]) + } + f.size = f.peekBuff.Size() + f.Reader = f.peekBuff + return f.peekBuff, nil + } else if err != nil { + return nil, err + } + f.peekBuff.Append(buf[:n]) + if conf.MaxBufferLimit-n > conf.MmapThreshold && conf.MmapThreshold > 0 { + m, err := mmap.Alloc(conf.MaxBufferLimit - n) + if err == nil { + f.Add(utils.CloseFunc(func() error { + return mmap.Free(m) + })) + n, err = io.ReadFull(reader, m) + if err == io.ErrUnexpectedEOF { + if n > 0 { + f.peekBuff.Append(m[:n]) + } + f.size = f.peekBuff.Size() + f.Reader = f.peekBuff + return f.peekBuff, nil + } else if err != nil { + return nil, err + } + } + } + + tmpF, err := utils.CreateTempFile(reader, 0) + if err != nil { + return nil, err + } + f.Add(utils.CloseFunc(func() error { + return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name())) + })) + peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF) + if err != nil { + return nil, err + } + f.size = peekF.Size() + f.Reader = peekF + return peekF, nil + } + f.Reader = reader return f.cache(f.GetSize()) } @@ -162,7 +217,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { } size := httpRange.Start + httpRange.Length - if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) { + if f.peekBuff != nil && size <= int64(f.peekBuff.Size()) { return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil } @@ -179,16 +234,25 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { // 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大 func (f *FileStream) cache(maxCacheSize int64) (model.File, error) { - limit := int64(conf.MaxBufferLimit) + if maxCacheSize > int64(conf.MaxBufferLimit) { + tmpF, err := utils.CreateTempFile(f.Reader, f.GetSize()) + if err != nil { + return nil, err + } + f.Add(tmpF) + f.tmpFile = tmpF + f.Reader = tmpF + return tmpF, nil + } if f.peekBuff == nil { f.peekBuff = &buffer.Reader{} f.oriReader = f.Reader } + bufSize := maxCacheSize - int64(f.peekBuff.Size()) var buf []byte - bufSize := 64 << 10 // 64KB as default - if conf.MmapThreshold > 0 && bufSize >= conf.MmapThreshold { - m, err := mmap.Alloc(bufSize) + if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) { + m, err := mmap.Alloc(int(bufSize)) if err == nil { f.Add(utils.CloseFunc(func() error { return mmap.Free(m) @@ -196,40 +260,21 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) { buf = m } } - - var readBytes int - // precache first `limit` byte - for int64(readBytes) < limit { - if buf == nil { - buf = make([]byte, bufSize) - } - - want := limit - int64(readBytes) - if want > int64(len(buf)) { - want = int64(len(buf)) - } - n, err := f.oriReader.Read(buf[:want]) - if n > 0 { - f.peekBuff.Append(buf[:n]) - readBytes += n - } - if err == io.EOF { - f.Reader = f.peekBuff - f.oriReader = nil - // should update real file size here to solve `GetSize == 0` issue - f.size = int64(readBytes) - return f.peekBuff, nil - } + if buf == nil { + buf = make([]byte, bufSize) } - // if file is larger than MaxBufferLimit, fallback to disk - tmpF, err := utils.CreateTempFile(io.MultiReader(f.peekBuff, f.oriReader), f.GetSize()) - if err != nil { - return nil, err + n, err := io.ReadFull(f.oriReader, buf) + if bufSize != int64(n) { + return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err) + } + f.peekBuff.Append(buf) + if int64(f.peekBuff.Size()) >= f.GetSize() { + f.Reader = f.peekBuff + f.oriReader = nil + } else { + f.Reader = io.MultiReader(f.peekBuff, f.oriReader) } - f.Add(tmpF) - f.tmpFile = tmpF - f.Reader = tmpF - return tmpF, nil + return f.peekBuff, nil } func (f *FileStream) SetTmpFile(file model.File) { diff --git a/pkg/buffer/bytes.go b/pkg/buffer/bytes.go index 3ee107478..6a7afc50d 100644 --- a/pkg/buffer/bytes.go +++ b/pkg/buffer/bytes.go @@ -8,29 +8,29 @@ import ( // 用于存储不复用的[]byte type Reader struct { bufs [][]byte - length int - offset int + size int64 + offset int64 } -func (r *Reader) Len() int { - return r.length +func (r *Reader) Size() int64 { + return r.size } func (r *Reader) Append(buf []byte) { - r.length += len(buf) + r.size += int64(len(buf)) r.bufs = append(r.bufs, buf) } func (r *Reader) Read(p []byte) (int, error) { - n, err := r.ReadAt(p, int64(r.offset)) + n, err := r.ReadAt(p, r.offset) if n > 0 { - r.offset += n + r.offset += int64(n) } return n, err } func (r *Reader) ReadAt(p []byte, off int64) (int, error) { - if off < 0 || off >= int64(r.length) { + if off < 0 || off >= r.size { return 0, io.EOF } @@ -56,35 +56,35 @@ func (r *Reader) ReadAt(p []byte, off int64) (int, error) { } func (r *Reader) Seek(offset int64, whence int) (int64, error) { - var abs int switch whence { case io.SeekStart: - abs = int(offset) case io.SeekCurrent: - abs = r.offset + int(offset) + offset = r.offset + offset case io.SeekEnd: - abs = r.length + int(offset) + offset = r.size + offset default: return 0, errors.New("Seek: invalid whence") } - if abs < 0 || abs > r.length { + if offset < 0 || offset > r.size { return 0, errors.New("Seek: invalid offset") } - r.offset = abs - return int64(abs), nil + r.offset = offset + return offset, nil } func (r *Reader) Reset() { clear(r.bufs) r.bufs = nil - r.length = 0 + r.size = 0 r.offset = 0 } func NewReader(buf ...[]byte) *Reader { - b := &Reader{} + b := &Reader{ + bufs: make([][]byte, 0, len(buf)), + } for _, b1 := range buf { b.Append(b1) } diff --git a/pkg/buffer/bytes_test.go b/pkg/buffer/bytes_test.go index b66af229c..309906994 100644 --- a/pkg/buffer/bytes_test.go +++ b/pkg/buffer/bytes_test.go @@ -71,7 +71,7 @@ func TestReader_ReadAt(t *testing.T) { off: 24, }, want: func(a args, n int, err error) error { - if n != bs.Len()-int(a.off) { + if n != int(bs.Size()-a.off) { return errors.New("read length not match") } if string(a.p[:n]) != "OpenList" { diff --git a/pkg/buffer/file.go b/pkg/buffer/file.go new file mode 100644 index 000000000..48edf5a4c --- /dev/null +++ b/pkg/buffer/file.go @@ -0,0 +1,88 @@ +package buffer + +import ( + "errors" + "io" + "os" +) + +type PeekFile struct { + peek *Reader + file *os.File + offset int64 + size int64 +} + +func (p *PeekFile) Read(b []byte) (n int, err error) { + n, err = p.ReadAt(b, p.offset) + if n > 0 { + p.offset += int64(n) + } + return n, err +} + +func (p *PeekFile) ReadAt(b []byte, off int64) (n int, err error) { + if off < p.peek.Size() { + n, err = p.peek.ReadAt(b, off) + if err == nil || n == len(b) { + return n, nil + } + // EOF + } + var nn int + nn, err = p.file.ReadAt(b[n:], off+int64(n)-p.peek.Size()) + return n + nn, err +} + +func (p *PeekFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + case io.SeekCurrent: + if offset == 0 { + return p.offset, nil + } + offset = p.offset + offset + case io.SeekEnd: + offset = p.size + offset + default: + return 0, errors.New("Seek: invalid whence") + } + + if offset < 0 || offset > p.size { + return 0, errors.New("Seek: invalid offset") + } + if offset <= p.peek.Size() { + _, err := p.peek.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + _, err = p.file.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + } else { + _, err := p.peek.Seek(p.peek.Size(), io.SeekStart) + if err != nil { + return 0, err + } + _, err = p.file.Seek(offset-p.peek.Size(), io.SeekStart) + if err != nil { + return 0, err + } + } + + p.offset = offset + return offset, nil +} + +func (p *PeekFile) Size() int64 { + return p.size +} + +func NewPeekFile(peek *Reader, file *os.File) (*PeekFile, error) { + stat, err := file.Stat() + if err == nil { + return &PeekFile{peek: peek, file: file, size: stat.Size() + peek.Size()}, nil + } + return nil, err +} From 1eec47f1b30e7f28fbe896a369b705105f16f672 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Wed, 27 Aug 2025 20:35:59 +0800 Subject: [PATCH 4/9] fix bug --- internal/stream/stream.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/internal/stream/stream.go b/internal/stream/stream.go index cbd999ba1..e1fb934bd 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -145,17 +145,16 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ // 检查是否有数据 buf := make([]byte, 64*utils.KB) n, err := io.ReadFull(reader, buf) + if n > 0 { + f.peekBuff.Append(buf[:n]) + } if err == io.ErrUnexpectedEOF { - if n > 0 { - f.peekBuff.Append(buf[:n]) - } f.size = f.peekBuff.Size() f.Reader = f.peekBuff return f.peekBuff, nil } else if err != nil { return nil, err } - f.peekBuff.Append(buf[:n]) if conf.MaxBufferLimit-n > conf.MmapThreshold && conf.MmapThreshold > 0 { m, err := mmap.Alloc(conf.MaxBufferLimit - n) if err == nil { @@ -163,10 +162,10 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ return mmap.Free(m) })) n, err = io.ReadFull(reader, m) + if n > 0 { + f.peekBuff.Append(m[:n]) + } if err == io.ErrUnexpectedEOF { - if n > 0 { - f.peekBuff.Append(m[:n]) - } f.size = f.peekBuff.Size() f.Reader = f.peekBuff return f.peekBuff, nil From 4ae1464c9af54fedd8756063d1d1309d80419a9e Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Thu, 28 Aug 2025 11:49:35 +0800 Subject: [PATCH 5/9] fix(buffer): optimize ReadAt method for improved performance --- pkg/buffer/bytes.go | 25 ++++++++++++++----------- pkg/buffer/bytes_test.go | 3 +-- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/pkg/buffer/bytes.go b/pkg/buffer/bytes.go index 6a7afc50d..3e6cb5405 100644 --- a/pkg/buffer/bytes.go +++ b/pkg/buffer/bytes.go @@ -34,22 +34,25 @@ func (r *Reader) ReadAt(p []byte, off int64) (int, error) { return 0, io.EOF } - n, length := 0, int64(0) + n := 0 readFrom := false for _, buf := range r.bufs { - newLength := length + int64(len(buf)) if readFrom { - w := copy(p[n:], buf) - n += w - } else if off < newLength { + nn := copy(p[n:], buf) + n += nn + if n == len(p) { + return n, nil + } + } else if newOff := off - int64(len(buf)); newOff >= 0 { + off = newOff + } else { + nn := copy(p, buf[off:]) + if nn == len(p) { + return nn, nil + } + n += nn readFrom = true - w := copy(p[n:], buf[int(off-length):]) - n += w } - if n == len(p) { - return n, nil - } - length = newLength } return n, io.EOF diff --git a/pkg/buffer/bytes_test.go b/pkg/buffer/bytes_test.go index 309906994..3f4d85563 100644 --- a/pkg/buffer/bytes_test.go +++ b/pkg/buffer/bytes_test.go @@ -13,8 +13,7 @@ func TestReader_ReadAt(t *testing.T) { } bs := &Reader{} bs.Append([]byte("github.com")) - bs.Append([]byte("/")) - bs.Append([]byte("OpenList")) + bs.Append([]byte("/OpenList")) bs.Append([]byte("Team/")) bs.Append([]byte("OpenList")) tests := []struct { From 4ddb579b2ffc1542a3754bfb49a981d1b8a38f59 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 30 Aug 2025 12:14:04 +0800 Subject: [PATCH 6/9] fix(upload): handle Content-Length and File-Size headers for better size management --- internal/stream/stream.go | 4 ++-- server/handles/fsup.go | 18 ++++++++++-------- server/webdav/webdav.go | 13 ++++++++++++- 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/internal/stream/stream.go b/internal/stream/stream.go index e1fb934bd..8d2f504fd 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -138,12 +138,12 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ reader = io.TeeReader(reader, writer) } - if f.GetSize() == 0 { + if f.GetSize() < 0 { if f.peekBuff == nil { f.peekBuff = &buffer.Reader{} } // 检查是否有数据 - buf := make([]byte, 64*utils.KB) + buf := []byte{0} n, err := io.ReadFull(reader, buf) if n > 0 { f.peekBuff.Append(buf[:n]) diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 087a58a9a..c9e1f3bcc 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -56,14 +56,16 @@ func FsStream(c *gin.Context) { } } dir, name := stdpath.Split(path) - sizeStr := c.GetHeader("Content-Length") - if sizeStr == "" { - sizeStr = "0" - } - size, err := strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - common.ErrorResp(c, err, 400) - return + size := c.Request.ContentLength + if size < 0 { + sizeStr := c.GetHeader("File-Size") + if sizeStr != "" { + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + common.ErrorResp(c, err, 400) + return + } + } } h := make(map[*utils.HashType]string) if md5 := c.GetHeader("X-File-Md5"); md5 != "" { diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index b6f7cdac8..983f08eae 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -14,6 +14,7 @@ import ( "net/url" "os" "path" + "strconv" "strings" "time" @@ -341,9 +342,19 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, if err != nil { return http.StatusForbidden, err } + size := r.ContentLength + if size < 0 { + sizeStr := r.Header.Get("File-Size") + if sizeStr != "" { + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return http.StatusBadRequest, err + } + } + } obj := model.Object{ Name: path.Base(reqPath), - Size: r.ContentLength, + Size: size, Modified: h.getModTime(r), Ctime: h.getCreateTime(r), } From 3a82d012e58e3f3fe85e7a1c0e122a1da3808b67 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Mon, 1 Sep 2025 12:49:39 +0800 Subject: [PATCH 7/9] =?UTF-8?q?fix(189pc):=20=E7=A7=BB=E9=99=A4=E9=87=8D?= =?UTF-8?q?=E5=A4=8D=E9=99=90=E9=80=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- drivers/189pc/utils.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index c791e7553..eb2b040b2 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -573,8 +573,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo // step.4 上传切片 uploadUrl := uploadUrls[0] - _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, - driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily) + _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily) if err != nil { return err } From 0fea41bf00091be0c27ba917e7584aafe4cbf916 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Mon, 15 Sep 2025 17:37:56 +0800 Subject: [PATCH 8/9] fix(upload): handle negative file size during streaming uploads --- internal/op/fs.go | 5 +++++ server/handles/fsup.go | 1 + 2 files changed, 6 insertions(+) diff --git a/internal/op/fs.go b/internal/op/fs.go index 114c26fcc..c5a5b52d3 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -630,6 +630,11 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod up = func(p float64) {} } + // 如果小于0,则通过缓存获取完整大小,可能发生于流式上传 + if file.GetSize() < 0 { + log.Warnf("file size < 0, try to get full size from cache") + file.CacheFullAndWriter(nil, nil) + } switch s := storage.(type) { case driver.PutResult: var newObj model.Obj diff --git a/server/handles/fsup.go b/server/handles/fsup.go index c9e1f3bcc..87a07f2cd 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -56,6 +56,7 @@ func FsStream(c *gin.Context) { } } dir, name := stdpath.Split(path) + // 如果请求头 Content-Length 和 File-Size 都没有,则 size=-1,表示未知大小的流式上传 size := c.Request.ContentLength if size < 0 { sizeStr := c.GetHeader("File-Size") From cf4aec2ac11530c18716dd1121778a6241f21be3 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Mon, 15 Sep 2025 17:58:48 +0800 Subject: [PATCH 9/9] fix(upload): update header key from File-Size to X-File-Size for size retrieval --- server/handles/fsup.go | 4 ++-- server/webdav/webdav.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 87a07f2cd..71d9dbae7 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -56,10 +56,10 @@ func FsStream(c *gin.Context) { } } dir, name := stdpath.Split(path) - // 如果请求头 Content-Length 和 File-Size 都没有,则 size=-1,表示未知大小的流式上传 + // 如果请求头 Content-Length 和 X-File-Size 都没有,则 size=-1,表示未知大小的流式上传 size := c.Request.ContentLength if size < 0 { - sizeStr := c.GetHeader("File-Size") + sizeStr := c.GetHeader("X-File-Size") if sizeStr != "" { size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil { diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index 983f08eae..802947eb4 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -344,7 +344,7 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, } size := r.ContentLength if size < 0 { - sizeStr := r.Header.Get("File-Size") + sizeStr := r.Header.Get("X-File-Size") if sizeStr != "" { size, err = strconv.ParseInt(sizeStr, 10, 64) if err != nil {