mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
Reduce allocations (#17584)
* Reduce allocations * Add stringsHasPrefixFold which can compare string prefixes, while ignoring case and not allocating. * Reuse all msgp.Readers * Reuse metadata buffers when not reading data. * Make type safe. Make buffer 4K instead of 8. * Unslice
This commit is contained in:
parent
1bf23374a3
commit
ff5988f4e0
@ -153,7 +153,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
@ -166,7 +166,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
||||
|
||||
var isSet bool
|
||||
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(userMetadataPrefix)) {
|
||||
if !stringsHasPrefixFold(k, userMetadataPrefix) {
|
||||
continue
|
||||
}
|
||||
w.Header()[strings.ToLower(k)] = []string{v}
|
||||
|
@ -550,7 +550,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
@ -693,7 +693,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES)
|
||||
}
|
||||
for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
|
@ -520,7 +520,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
||||
if len(r.Flags.Filter.Metadata) > 0 {
|
||||
for _, kv := range r.Flags.Filter.Metadata {
|
||||
for k, v := range oi.UserDefined {
|
||||
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) {
|
||||
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
// We only need to match x-amz-meta or standardHeaders
|
||||
@ -1075,7 +1075,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if len(r.Flags.Filter.Metadata) > 0 {
|
||||
for _, kv := range r.Flags.Filter.Metadata {
|
||||
for k, v := range info.Metadata {
|
||||
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) {
|
||||
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
// We only need to match x-amz-meta or standardHeaders
|
||||
|
@ -289,7 +289,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob
|
||||
)
|
||||
encMetadata := make(map[string]string)
|
||||
for k, v := range oi.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
encMetadata[k] = v
|
||||
}
|
||||
}
|
||||
@ -401,7 +401,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
||||
if len(r.Flags.Filter.Metadata) > 0 {
|
||||
for _, kv := range r.Flags.Filter.Metadata {
|
||||
for k, v := range info.Metadata {
|
||||
if !strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") && !isStandardHeader(k) {
|
||||
if !stringsHasPrefixFold(k, "x-amz-meta-") && !isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
// We only need to match x-amz-meta or standardHeaders
|
||||
|
@ -691,7 +691,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O
|
||||
|
||||
if rreq.Type == SelectRestoreRequest {
|
||||
for _, v := range rreq.OutputLocation.S3.UserMetadata {
|
||||
if !strings.HasPrefix(strings.ToLower(v.Name), "x-amz-meta") {
|
||||
if !stringsHasPrefixFold(v.Name, "x-amz-meta") {
|
||||
meta["x-amz-meta-"+v.Name] = v.Value
|
||||
continue
|
||||
}
|
||||
|
@ -679,7 +679,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
||||
func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string {
|
||||
meta := make(map[string]string, len(oi.UserDefined))
|
||||
for k, v := range oi.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -744,7 +744,7 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
|
||||
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
if isStandardHeader(k) {
|
||||
@ -909,7 +909,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
|
||||
for k, v := range oi1.UserDefined {
|
||||
var found bool
|
||||
for _, prefix := range compareKeys {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
|
||||
if !stringsHasPrefixFold(k, prefix) {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
@ -924,7 +924,7 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati
|
||||
for k, v := range oi2.Metadata {
|
||||
var found bool
|
||||
for _, prefix := range compareKeys {
|
||||
if !strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
|
||||
if !stringsHasPrefixFold(k, prefix) {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
|
@ -139,14 +139,14 @@ func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *disk
|
||||
bkMeta := make(map[string]string, len(bkObjectInfo.UserDefined))
|
||||
cacheMeta := make(map[string]string, len(cacheObjInfo.UserDefined))
|
||||
for k, v := range bkObjectInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
continue
|
||||
}
|
||||
bkMeta[http.CanonicalHeaderKey(k)] = v
|
||||
}
|
||||
for k, v := range cacheObjInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
// Do not need to send any internal metadata
|
||||
continue
|
||||
}
|
||||
|
@ -527,6 +527,10 @@ func readAllXL(ctx context.Context, disks []StorageAPI, bucket, object string, r
|
||||
metadataArray := make([]*xlMetaV2, len(disks))
|
||||
metaFileInfos := make([]FileInfo, len(metadataArray))
|
||||
metadataShallowVersions := make([][]xlMetaV2ShallowVersion, len(disks))
|
||||
var v2bufs [][]byte
|
||||
if !readData {
|
||||
v2bufs = make([][]byte, len(disks))
|
||||
}
|
||||
|
||||
g := errgroup.WithNErrs(len(disks))
|
||||
// Read `xl.meta` in parallel across disks.
|
||||
@ -540,6 +544,10 @@ func readAllXL(ctx context.Context, disks []StorageAPI, bucket, object string, r
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !readData {
|
||||
// Save the buffer so we can reuse it.
|
||||
v2bufs[index] = rf.Buf
|
||||
}
|
||||
|
||||
var xl xlMetaV2
|
||||
if err = xl.LoadOrConvert(rf.Buf); err != nil {
|
||||
@ -623,6 +631,11 @@ func readAllXL(ctx context.Context, disks []StorageAPI, bucket, object string, r
|
||||
}
|
||||
metaFileInfos[index].DiskMTime = diskMTime
|
||||
}
|
||||
if !readData {
|
||||
for i := range v2bufs {
|
||||
metaDataPoolPut(v2bufs[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Return all the metadata.
|
||||
return metaFileInfos, errs
|
||||
|
@ -268,7 +268,7 @@ func (args eventArgs) ToEvent(escape bool) event.Event {
|
||||
newEvent.S3.Object.ContentType = args.Object.ContentType
|
||||
newEvent.S3.Object.UserMetadata = make(map[string]string, len(args.Object.UserDefined))
|
||||
for k, v := range args.Object.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
newEvent.S3.Object.UserMetadata[k] = v
|
||||
|
@ -71,7 +71,7 @@ const (
|
||||
// and must not set by clients
|
||||
func containsReservedMetadata(header http.Header) bool {
|
||||
for key := range header {
|
||||
if strings.HasPrefix(strings.ToLower(key), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(key, ReservedMetadataPrefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -87,7 +87,7 @@ func isHTTPHeaderSizeTooLarge(header http.Header) bool {
|
||||
length := len(key) + len(header.Get(key))
|
||||
size += length
|
||||
for _, prefix := range userMetadataKeyPrefixes {
|
||||
if strings.HasPrefix(strings.ToLower(key), prefix) {
|
||||
if stringsHasPrefixFold(key, prefix) {
|
||||
usersize += length
|
||||
break
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[
|
||||
|
||||
for key := range v {
|
||||
for _, prefix := range userMetadataKeyPrefixes {
|
||||
if !strings.HasPrefix(strings.ToLower(key), strings.ToLower(prefix)) {
|
||||
if !stringsHasPrefixFold(key, prefix) {
|
||||
continue
|
||||
}
|
||||
value, ok := nv[http.CanonicalHeaderKey(key)]
|
||||
|
@ -73,7 +73,7 @@ func (l *lockRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
|
||||
|
||||
func getLockArgs(r *http.Request) (args dsync.LockArgs, err error) {
|
||||
dec := msgpNewReader(io.LimitReader(r.Body, 1000*humanize.KiByte))
|
||||
defer readMsgpReaderPool.Put(dec)
|
||||
defer readMsgpReaderPoolPut(dec)
|
||||
err = args.DecodeMsg(dec)
|
||||
return args, err
|
||||
}
|
||||
|
@ -255,12 +255,13 @@ type metacacheReader struct {
|
||||
func newMetacacheReader(r io.Reader) *metacacheReader {
|
||||
dec := s2DecPool.Get().(*s2.Reader)
|
||||
dec.Reset(r)
|
||||
mr := msgp.NewReader(dec)
|
||||
mr := msgpNewReader(dec)
|
||||
return &metacacheReader{
|
||||
mr: mr,
|
||||
closer: func() {
|
||||
dec.Reset(nil)
|
||||
s2DecPool.Put(dec)
|
||||
readMsgpReaderPoolPut(mr)
|
||||
},
|
||||
creator: func() error {
|
||||
v, err := mr.ReadByte()
|
||||
|
@ -402,7 +402,7 @@ func extractETag(metadata map[string]string) string {
|
||||
// to do case insensitive checks.
|
||||
func HasPrefix(s string, prefix string) bool {
|
||||
if runtime.GOOS == globalWindowsOSName {
|
||||
return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix))
|
||||
return stringsHasPrefixFold(s, prefix)
|
||||
}
|
||||
return strings.HasPrefix(s, prefix)
|
||||
}
|
||||
|
@ -1261,7 +1261,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
|
||||
}
|
||||
|
||||
for k, v := range srcInfo.UserDefined {
|
||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
encMetadata[k] = v
|
||||
}
|
||||
}
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/klauspost/compress/gzhttp"
|
||||
@ -182,7 +181,7 @@ func StatusCode(text string) int {
|
||||
func fwdHeadersToS3(h http.Header, w http.ResponseWriter) {
|
||||
const trim = "x-amz-fwd-header-"
|
||||
for k, v := range h {
|
||||
if strings.HasPrefix(strings.ToLower(k), trim) {
|
||||
if stringsHasPrefixFold(k, trim) {
|
||||
w.Header()[k[len(trim):]] = v
|
||||
}
|
||||
}
|
||||
|
@ -62,8 +62,9 @@ func getCanonicalHeaders(signedHeaders http.Header) string {
|
||||
var headers []string
|
||||
vals := make(http.Header)
|
||||
for k, vv := range signedHeaders {
|
||||
headers = append(headers, strings.ToLower(k))
|
||||
vals[strings.ToLower(k)] = vv
|
||||
k = strings.ToLower(k)
|
||||
headers = append(headers, k)
|
||||
vals[k] = vv
|
||||
}
|
||||
sort.Strings(headers)
|
||||
|
||||
|
@ -227,7 +227,8 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
|
||||
rw.CloseWithError(waitForHTTPStream(respBody, rw))
|
||||
}()
|
||||
|
||||
ms := msgp.NewReader(rr)
|
||||
ms := msgpNewReader(rr)
|
||||
defer readMsgpReaderPoolPut(ms)
|
||||
for {
|
||||
// Read whether it is an update.
|
||||
upd, err := ms.ReadBool()
|
||||
@ -498,16 +499,24 @@ var readMsgpReaderPool = sync.Pool{New: func() interface{} { return &msgp.Reader
|
||||
|
||||
// mspNewReader returns a *Reader that reads from the provided reader.
|
||||
// The reader will be buffered.
|
||||
// Return with readMsgpReaderPoolPut when done.
|
||||
func msgpNewReader(r io.Reader) *msgp.Reader {
|
||||
p := readMsgpReaderPool.Get().(*msgp.Reader)
|
||||
if p.R == nil {
|
||||
p.R = xbufio.NewReaderSize(r, 8<<10)
|
||||
p.R = xbufio.NewReaderSize(r, 4<<10)
|
||||
} else {
|
||||
p.R.Reset(r)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// readMsgpReaderPoolPut can be used to reuse a *msgp.Reader.
|
||||
func readMsgpReaderPoolPut(r *msgp.Reader) {
|
||||
if r != nil {
|
||||
readMsgpReaderPool.Put(r)
|
||||
}
|
||||
}
|
||||
|
||||
func (client *storageRESTClient) ReadVersion(ctx context.Context, volume, path, versionID string, readData bool) (fi FileInfo, err error) {
|
||||
values := make(url.Values)
|
||||
values.Set(storageRESTVolume, volume)
|
||||
@ -522,7 +531,7 @@ func (client *storageRESTClient) ReadVersion(ctx context.Context, volume, path,
|
||||
defer xhttp.DrainBody(respBody)
|
||||
|
||||
dec := msgpNewReader(respBody)
|
||||
defer readMsgpReaderPool.Put(dec)
|
||||
defer readMsgpReaderPoolPut(dec)
|
||||
|
||||
err = fi.DecodeMsg(dec)
|
||||
return fi, err
|
||||
@ -541,7 +550,7 @@ func (client *storageRESTClient) ReadXL(ctx context.Context, volume string, path
|
||||
defer xhttp.DrainBody(respBody)
|
||||
|
||||
dec := msgpNewReader(respBody)
|
||||
defer readMsgpReaderPool.Put(dec)
|
||||
defer readMsgpReaderPoolPut(dec)
|
||||
|
||||
err = rf.DecodeMsg(dec)
|
||||
return rf, err
|
||||
@ -735,7 +744,7 @@ func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path
|
||||
return stat, err
|
||||
}
|
||||
rd := msgpNewReader(respReader)
|
||||
defer readMsgpReaderPool.Put(rd)
|
||||
defer readMsgpReaderPoolPut(rd)
|
||||
for {
|
||||
var st StatInfo
|
||||
err = st.DecodeMsg(rd)
|
||||
@ -774,6 +783,7 @@ func (client *storageRESTClient) ReadMultiple(ctx context.Context, req ReadMulti
|
||||
pw.CloseWithError(waitForHTTPStream(respBody, pw))
|
||||
}()
|
||||
mr := msgp.NewReader(pr)
|
||||
defer readMsgpReaderPoolPut(mr)
|
||||
for {
|
||||
var file ReadMultipleResp
|
||||
if err := file.DecodeMsg(mr); err != nil {
|
||||
|
@ -685,7 +685,8 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
|
||||
}
|
||||
|
||||
versions := make([]FileInfoVersions, totalVersions)
|
||||
decoder := msgp.NewReader(r.Body)
|
||||
decoder := msgpNewReader(r.Body)
|
||||
defer readMsgpReaderPoolPut(decoder)
|
||||
for i := 0; i < totalVersions; i++ {
|
||||
dst := &versions[i]
|
||||
if err := dst.DecodeMsg(decoder); err != nil {
|
||||
@ -1294,7 +1295,7 @@ func (s *storageRESTServer) ReadMultiple(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
var req ReadMultipleReq
|
||||
mr := msgpNewReader(r.Body)
|
||||
defer readMsgpReaderPool.Put(mr)
|
||||
defer readMsgpReaderPoolPut(mr)
|
||||
err := req.DecodeMsg(mr)
|
||||
if err != nil {
|
||||
rw.CloseWithError(err)
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
)
|
||||
|
||||
//go:generate msgp -file $GOFILE -unexported
|
||||
@ -120,7 +119,8 @@ func (jd *tierDiskJournal) WalkEntries(ctx context.Context, fn walkFn) {
|
||||
return
|
||||
}
|
||||
defer ro.Close()
|
||||
mr := msgp.NewReader(ro)
|
||||
mr := msgpNewReader(ro)
|
||||
defer readMsgpReaderPoolPut(mr)
|
||||
|
||||
done := false
|
||||
for {
|
||||
|
@ -1286,3 +1286,9 @@ func unwrapAll(err error) error {
|
||||
err = werr
|
||||
}
|
||||
}
|
||||
|
||||
// stringsHasPrefixFold tests whether the string s begins with prefix ignoring case.
|
||||
func stringsHasPrefixFold(s, prefix string) bool {
|
||||
// Test match with case first.
|
||||
return len(s) >= len(prefix) && (s[0:len(prefix)] == prefix || strings.EqualFold(s[0:len(prefix)], prefix))
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user