Reduce allocations (#17584)

* Reduce allocations

* Add stringsHasPrefixFold which can compare string prefixes, while ignoring case and not allocating.
* Reuse all msgp.Readers
* Reuse metadata buffers when not reading data.

* Make type safe. Make buffer 4K instead of 8.

* Unslice
This commit is contained in:
Klaus Post 2023-07-06 16:02:08 -07:00 committed by GitHub
parent 1bf23374a3
commit ff5988f4e0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 67 additions and 36 deletions

View File

@ -153,7 +153,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
continue continue
} }
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata // Do not need to send any internal metadata
// values to client. // values to client.
continue continue
@ -166,7 +166,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
var isSet bool var isSet bool
for _, userMetadataPrefix := range userMetadataKeyPrefixes { for _, userMetadataPrefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) { if !stringsHasPrefixFold(k, userMetadataPrefix) {
continue continue
} }
w.Header()[strings.ToLower(k)] = []string{v} w.Header()[strings.ToLower(k)] = []string{v}

View File

@ -550,7 +550,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES) content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
} }
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) { for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata // Do not need to send any internal metadata
// values to client. // values to client.
continue continue
@ -693,7 +693,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES) content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
} }
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) { for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata // Do not need to send any internal metadata
// values to client. // values to client.
continue continue

View File

@ -520,7 +520,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
if len(r.Flags.Filter.Metadata) > 0 { if len(r.Flags.Filter.Metadata) > 0 {
for _, kv := range r.Flags.Filter.Metadata { for _, kv := range r.Flags.Filter.Metadata {
for k, v := range oi.UserDefined { for k, v := range oi.UserDefined {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) { if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
continue continue
} }
// We only need to match x-amz-meta or standardHeaders // We only need to match x-amz-meta or standardHeaders
@ -1075,7 +1075,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
if len(r.Flags.Filter.Metadata) > 0 { if len(r.Flags.Filter.Metadata) > 0 {
for _, kv := range r.Flags.Filter.Metadata { for _, kv := range r.Flags.Filter.Metadata {
for k, v := range info.Metadata { for k, v := range info.Metadata {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) { if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
continue continue
} }
// We only need to match x-amz-meta or standardHeaders // We only need to match x-amz-meta or standardHeaders

View File

@ -289,7 +289,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob
) )
encMetadata := make(map[string]string) encMetadata := make(map[string]string)
for k, v := range oi.UserDefined { for k, v := range oi.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
encMetadata[k] = v encMetadata[k] = v
} }
} }
@ -401,7 +401,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
if len(r.Flags.Filter.Metadata) > 0 { if len(r.Flags.Filter.Metadata) > 0 {
for _, kv := range r.Flags.Filter.Metadata { for _, kv := range r.Flags.Filter.Metadata {
for k, v := range info.Metadata { for k, v := range info.Metadata {
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) { if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
continue continue
} }
// We only need to match x-amz-meta or standardHeaders // We only need to match x-amz-meta or standardHeaders

View File

@ -691,7 +691,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
if rreq.Type == SelectRestoreRequest { if rreq.Type == SelectRestoreRequest {
for _, v := range rreq.OutputLocation.S3.UserMetadata { for _, v := range rreq.OutputLocation.S3.UserMetadata {
if !strings.HasPrefix(strings.ToLower(v.Name), "x-amz-meta") { if !stringsHasPrefixFold(v.Name, "x-amz-meta") {
meta["x-amz-meta-"+v.Name] = v.Value meta["x-amz-meta-"+v.Name] = v.Value
continue continue
} }

View File

@ -679,7 +679,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string { func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string {
meta := make(map[string]string, len(oi.UserDefined)) meta := make(map[string]string, len(oi.UserDefined))
for k, v := range oi.UserDefined { for k, v := range oi.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
continue continue
} }
@ -744,7 +744,7 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) { func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) {
meta := make(map[string]string) meta := make(map[string]string)
for k, v := range objInfo.UserDefined { for k, v := range objInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
continue continue
} }
if isStandardHeader(k) { if isStandardHeader(k) {
@ -909,7 +909,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
for k, v := range oi1.UserDefined { for k, v := range oi1.UserDefined {
var found bool var found bool
for _, prefix := range compareKeys { for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { if !stringsHasPrefixFold(k, prefix) {
continue continue
} }
found = true found = true
@ -924,7 +924,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
for k, v := range oi2.Metadata { for k, v := range oi2.Metadata {
var found bool var found bool
for _, prefix := range compareKeys { for _, prefix := range compareKeys {
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { if !stringsHasPrefixFold(k, prefix) {
continue continue
} }
found = true found = true

View File

@ -139,14 +139,14 @@ func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *disk
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined)) bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined)) cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
for k, v := range bkObjectInfo.UserDefined { for k, v := range bkObjectInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata // Do not need to send any internal metadata
continue continue
} }
bkMeta[http.CanonicalHeaderKey(k)] = v bkMeta[http.CanonicalHeaderKey(k)] = v
} }
for k, v := range cacheObjInfo.UserDefined { for k, v := range cacheObjInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata // Do not need to send any internal metadata
continue continue
} }

View File

@ -527,6 +527,10 @@ func readAllXL(ctx context.Context, disks []StorageAPI, bucket, object string, r
metadataArray := make([]*xlMetaV2, len(disks)) metadataArray := make([]*xlMetaV2, len(disks))
metaFileInfos := make([]FileInfo, len(metadataArray)) metaFileInfos := make([]FileInfo, len(metadataArray))
metadataShallowVersions := make([][]xlMetaV2ShallowVersion, len(disks)) metadataShallowVersions := make([][]xlMetaV2ShallowVersion, len(disks))
var v2bufs [][]byte
if !readData {
v2bufs = make([][]byte, len(disks))
}
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
// Read `xl.meta` in parallel across disks. // Read `xl.meta` in parallel across disks.
@ -540,6 +544,10 @@ func readAllXL(ctx context.Context, disks []StorageAPI, bucket, object string, r
if err != nil { if err != nil {
return err return err
} }
if !readData {
// Save the buffer so we can reuse it.
v2bufs[index] = rf.Buf
}
var xl xlMetaV2 var xl xlMetaV2
if err = xl.LoadOrConvert(rf.Buf); err != nil { if err = xl.LoadOrConvert(rf.Buf); err != nil {
@ -623,6 +631,11 @@ func readAllXL(ctx context.Context, disks []StorageAPI, bucket, object string, r
} }
metaFileInfos[index].DiskMTime = diskMTime metaFileInfos[index].DiskMTime = diskMTime
} }
if !readData {
for i := range v2bufs {
metaDataPoolPut(v2bufs[i])
}
}
// Return all the metadata. // Return all the metadata.
return metaFileInfos, errs return metaFileInfos, errs

View File

@ -268,7 +268,7 @@ func (args eventArgs) ToEvent(escape bool) event.Event {
newEvent.S3.Object.ContentType = args.Object.ContentType newEvent.S3.Object.ContentType = args.Object.ContentType
newEvent.S3.Object.UserMetadata = make(map[string]string, len(args.Object.UserDefined)) newEvent.S3.Object.UserMetadata = make(map[string]string, len(args.Object.UserDefined))
for k, v := range args.Object.UserDefined { for k, v := range args.Object.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(strings.ToLower(k), ReservedMetadataPrefixLower) {
continue continue
} }
newEvent.S3.Object.UserMetadata[k] = v newEvent.S3.Object.UserMetadata[k] = v

View File

@ -71,7 +71,7 @@ const (
// and must not set by clients // and must not set by clients
func containsReservedMetadata(header http.Header) bool { func containsReservedMetadata(header http.Header) bool {
for key := range header { for key := range header {
if strings.HasPrefix(strings.ToLower(key), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(key, ReservedMetadataPrefix) {
return true return true
} }
} }
@ -87,7 +87,7 @@ func isHTTPHeaderSizeTooLarge(header http.Header) bool {
length := len(key) + len(header.Get(key)) length := len(key) + len(header.Get(key))
size += length size += length
for _, prefix := range userMetadataKeyPrefixes { for _, prefix := range userMetadataKeyPrefixes {
if strings.HasPrefix(strings.ToLower(key), prefix) { if stringsHasPrefixFold(key, prefix) {
usersize += length usersize += length
break break
} }

View File

@ -184,7 +184,7 @@ func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[
for key := range v { for key := range v {
for _, prefix := range userMetadataKeyPrefixes { for _, prefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(strings.ToLower(key), strings.ToLower(prefix)) { if !stringsHasPrefixFold(key, prefix) {
continue continue
} }
value, ok := nv[http.CanonicalHeaderKey(key)] value, ok := nv[http.CanonicalHeaderKey(key)]

View File

@ -73,7 +73,7 @@ func (l *lockRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
func getLockArgs(r *http.Request) (args dsync.LockArgs, err error) { func getLockArgs(r *http.Request) (args dsync.LockArgs, err error) {
dec := msgpNewReader(io.LimitReader(r.Body, 1000*humanize.KiByte)) dec := msgpNewReader(io.LimitReader(r.Body, 1000*humanize.KiByte))
defer readMsgpReaderPool.Put(dec) defer readMsgpReaderPoolPut(dec)
err = args.DecodeMsg(dec) err = args.DecodeMsg(dec)
return args, err return args, err
} }

View File

@ -255,12 +255,13 @@ type metacacheReader struct {
func newMetacacheReader(r io.Reader) *metacacheReader { func newMetacacheReader(r io.Reader) *metacacheReader {
dec := s2DecPool.Get().(*s2.Reader) dec := s2DecPool.Get().(*s2.Reader)
dec.Reset(r) dec.Reset(r)
mr := msgp.NewReader(dec) mr := msgpNewReader(dec)
return &metacacheReader{ return &metacacheReader{
mr: mr, mr: mr,
closer: func() { closer: func() {
dec.Reset(nil) dec.Reset(nil)
s2DecPool.Put(dec) s2DecPool.Put(dec)
readMsgpReaderPoolPut(mr)
}, },
creator: func() error { creator: func() error {
v, err := mr.ReadByte() v, err := mr.ReadByte()

View File

@ -402,7 +402,7 @@ func extractETag(metadata map[string]string) string {
// to do case insensitive checks. // to do case insensitive checks.
func HasPrefix(s string, prefix string) bool { func HasPrefix(s string, prefix string) bool {
if runtime.GOOS == globalWindowsOSName { if runtime.GOOS == globalWindowsOSName {
return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix)) return stringsHasPrefixFold(s, prefix)
} }
return strings.HasPrefix(s, prefix) return strings.HasPrefix(s, prefix)
} }

View File

@ -1261,7 +1261,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
} }
for k, v := range srcInfo.UserDefined { for k, v := range srcInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
encMetadata[k] = v encMetadata[k] = v
} }
} }

View File

@ -22,7 +22,6 @@ import (
"io" "io"
"net/http" "net/http"
"net/url" "net/url"
"strings"
"time" "time"
"github.com/klauspost/compress/gzhttp" "github.com/klauspost/compress/gzhttp"
@ -182,7 +181,7 @@ func StatusCode(text string) int {
func fwdHeadersToS3(h http.Header, w http.ResponseWriter) { func fwdHeadersToS3(h http.Header, w http.ResponseWriter) {
const trim = "x-amz-fwd-header-" const trim = "x-amz-fwd-header-"
for k, v := range h { for k, v := range h {
if strings.HasPrefix(strings.ToLower(k), trim) { if stringsHasPrefixFold(k, trim) {
w.Header()[k[len(trim):]] = v w.Header()[k[len(trim):]] = v
} }
} }

View File

@ -62,8 +62,9 @@ func getCanonicalHeaders(signedHeaders http.Header) string {
var headers []string var headers []string
vals := make(http.Header) vals := make(http.Header)
for k, vv := range signedHeaders { for k, vv := range signedHeaders {
headers = append(headers, strings.ToLower(k)) k = strings.ToLower(k)
vals[strings.ToLower(k)] = vv headers = append(headers, k)
vals[k] = vv
} }
sort.Strings(headers) sort.Strings(headers)

View File

@ -227,7 +227,8 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
rw.CloseWithError(waitForHTTPStream(respBody, rw)) rw.CloseWithError(waitForHTTPStream(respBody, rw))
}() }()
ms := msgp.NewReader(rr) ms := msgpNewReader(rr)
defer readMsgpReaderPoolPut(ms)
for { for {
// Read whether it is an update. // Read whether it is an update.
upd, err := ms.ReadBool() upd, err := ms.ReadBool()
@ -498,16 +499,24 @@ var readMsgpReaderPool = sync.Pool{New: func() interface{} { return &msgp.Reader
// mspNewReader returns a *Reader that reads from the provided reader. // mspNewReader returns a *Reader that reads from the provided reader.
// The reader will be buffered. // The reader will be buffered.
// Return with readMsgpReaderPoolPut when done.
func msgpNewReader(r io.Reader) *msgp.Reader { func msgpNewReader(r io.Reader) *msgp.Reader {
p := readMsgpReaderPool.Get().(*msgp.Reader) p := readMsgpReaderPool.Get().(*msgp.Reader)
if p.R == nil { if p.R == nil {
p.R = xbufio.NewReaderSize(r, 8<<10) p.R = xbufio.NewReaderSize(r, 4<<10)
} else { } else {
p.R.Reset(r) p.R.Reset(r)
} }
return p return p
} }
// readMsgpReaderPoolPut can be used to reuse a *msgp.Reader.
func readMsgpReaderPoolPut(r *msgp.Reader) {
if r != nil {
readMsgpReaderPool.Put(r)
}
}
func (client *storageRESTClient) ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (fi FileInfo, err error) { func (client *storageRESTClient) ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (fi FileInfo, err error) {
values := make(url.Values) values := make(url.Values)
values.Set(storageRESTVolume, volume) values.Set(storageRESTVolume, volume)
@ -522,7 +531,7 @@ func (client *storageRESTClient) ReadVersion(ctx context.Context, volume, path,
defer xhttp.DrainBody(respBody) defer xhttp.DrainBody(respBody)
dec := msgpNewReader(respBody) dec := msgpNewReader(respBody)
defer readMsgpReaderPool.Put(dec) defer readMsgpReaderPoolPut(dec)
err = fi.DecodeMsg(dec) err = fi.DecodeMsg(dec)
return fi, err return fi, err
@ -541,7 +550,7 @@ func (client *storageRESTClient) ReadXL(ctx context.Context, volume string, path
defer xhttp.DrainBody(respBody) defer xhttp.DrainBody(respBody)
dec := msgpNewReader(respBody) dec := msgpNewReader(respBody)
defer readMsgpReaderPool.Put(dec) defer readMsgpReaderPoolPut(dec)
err = rf.DecodeMsg(dec) err = rf.DecodeMsg(dec)
return rf, err return rf, err
@ -735,7 +744,7 @@ func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path
return stat, err return stat, err
} }
rd := msgpNewReader(respReader) rd := msgpNewReader(respReader)
defer readMsgpReaderPool.Put(rd) defer readMsgpReaderPoolPut(rd)
for { for {
var st StatInfo var st StatInfo
err = st.DecodeMsg(rd) err = st.DecodeMsg(rd)
@ -774,6 +783,7 @@ func (client *storageRESTClient) ReadMultiple(ctx context.Context, req ReadMulti
pw.CloseWithError(waitForHTTPStream(respBody, pw)) pw.CloseWithError(waitForHTTPStream(respBody, pw))
}() }()
mr := msgp.NewReader(pr) mr := msgp.NewReader(pr)
defer readMsgpReaderPoolPut(mr)
for { for {
var file ReadMultipleResp var file ReadMultipleResp
if err := file.DecodeMsg(mr); err != nil { if err := file.DecodeMsg(mr); err != nil {

View File

@ -685,7 +685,8 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
} }
versions := make([]FileInfoVersions, totalVersions) versions := make([]FileInfoVersions, totalVersions)
decoder := msgp.NewReader(r.Body) decoder := msgpNewReader(r.Body)
defer readMsgpReaderPoolPut(decoder)
for i := 0; i < totalVersions; i++ { for i := 0; i < totalVersions; i++ {
dst := &versions[i] dst := &versions[i]
if err := dst.DecodeMsg(decoder); err != nil { if err := dst.DecodeMsg(decoder); err != nil {
@ -1294,7 +1295,7 @@ func (s *storageRESTServer) ReadMultiple(w http.ResponseWriter, r *http.Request)
var req ReadMultipleReq var req ReadMultipleReq
mr := msgpNewReader(r.Body) mr := msgpNewReader(r.Body)
defer readMsgpReaderPool.Put(mr) defer readMsgpReaderPoolPut(mr)
err := req.DecodeMsg(mr) err := req.DecodeMsg(mr)
if err != nil { if err != nil {
rw.CloseWithError(err) rw.CloseWithError(err)

View File

@ -29,7 +29,6 @@ import (
"time" "time"
"github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp"
) )
//go:generate msgp -file $GOFILE -unexported //go:generate msgp -file $GOFILE -unexported
@ -120,7 +119,8 @@ func (jd *tierDiskJournal) WalkEntries(ctx context.Context, fn walkFn) {
return return
} }
defer ro.Close() defer ro.Close()
mr := msgp.NewReader(ro) mr := msgpNewReader(ro)
defer readMsgpReaderPoolPut(mr)
done := false done := false
for { for {

View File

@ -1286,3 +1286,9 @@ func unwrapAll(err error) error {
err = werr err = werr
} }
} }
// stringsHasPrefixFold tests whether the string s begins with prefix ignoring case.
func stringsHasPrefixFold(s, prefix string) bool {
// Test match with case first.
return len(s) >= len(prefix) && (s[0:len(prefix)] == prefix || strings.EqualFold(s[0:len(prefix)], prefix))
}