add optimizations to bring performance on unversioned READS (#20128)

allow non-inlined on disk to be inlined via
an unversioned ReadVersion() call, we only
need ReadXL() to resolve objects with multiple
versions only.

The choice of this block makes it to be dynamic
and chosen by the user via `mc admin config set`

Other bonus things

- Start measuring internode TTFB performance.
- Set TCP_NODELAY, TCP_CORK for low latency
This commit is contained in:
Harshavardhana
2024-07-23 03:53:03 -07:00
committed by GitHub
parent c0e2886e37
commit 91805bcab6
8 changed files with 69 additions and 22 deletions

View File

@@ -283,7 +283,7 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
}
if unlockOnDefer {
unlockOnDefer = fi.InlineData()
unlockOnDefer = fi.InlineData() || len(fi.Data) > 0
}
pr, pw := xioutil.WaitPipe()
@@ -908,6 +908,8 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
}
rw.Lock()
// when its a versioned bucket and empty versionID - at totalResp == setDriveCount
// we must use rawFileInfo to resolve versions to figure out the latest version.
if opts.VersionID == "" && totalResp == er.setDriveCount {
fi, onlineMeta, onlineDisks, modTime, etag, err = calcQuorum(pickLatestQuorumFilesInfo(ctx,
rawArr, errs, bucket, object, readData, opts.InclFreeVersions, true))
@@ -915,7 +917,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
fi, onlineMeta, onlineDisks, modTime, etag, err = calcQuorum(metaArr, errs)
}
rw.Unlock()
if err == nil && fi.InlineData() {
if err == nil && (fi.InlineData() || len(fi.Data) > 0) {
break
}
}
@@ -1399,7 +1401,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
writers := make([]io.Writer, len(onlineDisks))
var inlineBuffers []*bytes.Buffer
if shardFileSize >= 0 {
if !opts.Versioned && shardFileSize < inlineBlock {
if !opts.Versioned && shardFileSize <= inlineBlock {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
} else if shardFileSize < inlineBlock/8 {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
@@ -1407,7 +1409,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
} else {
// If compressed, use actual size to determine.
if sz := erasure.ShardFileSize(data.ActualSize()); sz > 0 {
if !opts.Versioned && sz < inlineBlock {
if !opts.Versioned && sz <= inlineBlock {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))
} else if sz < inlineBlock/8 {
inlineBuffers = make([]*bytes.Buffer, len(onlineDisks))