Reuse small buffers (#12948)

When reading metadata allow reuse of buffers 
in certain cases. Take the low-hanging fruit.

Reduce GC overhead when listing.
This commit is contained in:
Klaus Post 2021-08-12 23:27:22 +02:00 committed by GitHub
parent 3eac02f676
commit 89febdb3d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 34 additions and 14 deletions

View File

@ -36,11 +36,11 @@ import (
const (
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
maxObjectList = metacacheBlockSize - (metacacheBlockSize / 10) // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
)
// LocationResponse - format for location response.

View File

@ -56,10 +56,11 @@ const metacacheStreamVersion = 2
// metacacheWriter provides a serializer of metacache objects.
type metacacheWriter struct {
mw *msgp.Writer
creator func() error
closer func() error
blockSize int
mw *msgp.Writer
creator func() error
closer func() error
blockSize int
reuseBlocks bool
streamErr error
streamWg sync.WaitGroup
@ -141,6 +142,9 @@ func (w *metacacheWriter) write(objs ...metaCacheEntry) error {
if err != nil {
return err
}
if w.reuseBlocks && cap(o.metadata) >= metaDataReadDefault {
metaDataPool.Put(o.metadata)
}
}
return nil

View File

@ -76,6 +76,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
// Use a small block size to start sending quickly
w := newMetacacheWriter(wr, 16<<10)
w.reuseBlocks = true // We are not sharing results, so reuse buffers.
defer w.Close()
out, err := w.stream()
if err != nil {

View File

@ -25,6 +25,7 @@ import (
"io"
"sort"
"strings"
"sync"
"time"
"github.com/cespare/xxhash/v2"
@ -1430,20 +1431,26 @@ func (z xlMetaV2) ToFileInfo(volume, path, versionID string) (fi FileInfo, err e
return FileInfo{}, errFileVersionNotFound
}
// Read at most this much on initial read.
const metaDataReadDefault = 4 << 10
// Return used metadata byte slices here.
var metaDataPool = sync.Pool{New: func() interface{} { return make([]byte, 0, metaDataReadDefault) }}
// readXLMetaNoData will load the metadata, but skip data segments.
// This should only be used when data is never interesting.
// If data is not xlv2, it is returned in full.
func readXLMetaNoData(r io.Reader, size int64) ([]byte, error) {
// Read at most this much on initial read.
const readDefault = 4 << 10
initial := size
hasFull := true
if initial > readDefault {
initial = readDefault
if initial > metaDataReadDefault {
initial = metaDataReadDefault
hasFull = false
}
buf := make([]byte, initial)
buf := metaDataPool.Get().([]byte)
buf = buf[:initial]
_, err := io.ReadFull(r, buf)
if err != nil {
return nil, fmt.Errorf("readXLMetaNoData.ReadFull: %w", err)

View File

@ -1082,6 +1082,7 @@ func (s *xlStorage) ReadVersion(ctx context.Context, volume, path, versionID str
err = osErrToFileErr(err)
}
}
if err != nil {
if err == errFileNotFound {
if err = s.renameLegacyMetadata(volumeDir, path); err != nil {
@ -1120,6 +1121,13 @@ func (s *xlStorage) ReadVersion(ctx context.Context, volume, path, versionID str
return fi, err
}
if len(fi.Data) == 0 && cap(buf) >= metaDataReadDefault && cap(buf) < metaDataReadDefault*4 {
// We did not read inline data, so we have no references.
defer func(b []byte) {
metaDataPool.Put(buf)
}(buf)
}
if readData {
if len(fi.Data) > 0 || fi.Size == 0 {
if len(fi.Data) > 0 {