mirror of https://github.com/minio/minio.git
fix: pre-allocate certain slices with expected capacity (#12044)
Avoids append() based tiny allocations on known allocated slices repeated access.
This commit is contained in:
parent
f1bc857f66
commit
e85b28398b
|
@ -612,7 +612,7 @@ func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
|||
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
|
||||
}
|
||||
|
||||
var errs []error
|
||||
errs := make([]error, 0, len(s.sets)*s.setDriveCount)
|
||||
for i := range s.sets {
|
||||
errs = append(errs, storageInfoErrs[i]...)
|
||||
}
|
||||
|
|
|
@ -663,7 +663,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
|
|||
func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error {
|
||||
|
||||
// Compute the local disks eligible for meta volumes (re)initialization
|
||||
var disksToInit []StorageAPI
|
||||
disksToInit := make([]StorageAPI, 0, len(storageDisks))
|
||||
for index := range storageDisks {
|
||||
if formats[index] == nil || storageDisks[index] == nil || !storageDisks[index].IsLocal() {
|
||||
// Ignore create meta volume on disks which are not found or not local.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2016, 2017, 2018, 2019 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2016-2021 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -211,16 +211,18 @@ func (li *localLockInstance) GetLock(ctx context.Context, timeout *dynamicTimeou
|
|||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
const readLock = false
|
||||
var success []int
|
||||
success := make([]int, len(li.paths))
|
||||
for i, path := range li.paths {
|
||||
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
||||
timeout.LogFailure()
|
||||
for _, sint := range success {
|
||||
li.ns.unlock(li.volume, li.paths[sint], readLock)
|
||||
for si, sint := range success {
|
||||
if sint == 1 {
|
||||
li.ns.unlock(li.volume, li.paths[si], readLock)
|
||||
}
|
||||
}
|
||||
return nil, OperationTimedOut{}
|
||||
}
|
||||
success = append(success, i)
|
||||
success[i] = 1
|
||||
}
|
||||
timeout.LogSuccess(UTCNow().Sub(start))
|
||||
return ctx, nil
|
||||
|
@ -239,16 +241,18 @@ func (li *localLockInstance) GetRLock(ctx context.Context, timeout *dynamicTimeo
|
|||
lockSource := getSource(2)
|
||||
start := UTCNow()
|
||||
const readLock = true
|
||||
var success []int
|
||||
success := make([]int, len(li.paths))
|
||||
for i, path := range li.paths {
|
||||
if !li.ns.lock(ctx, li.volume, path, lockSource, li.opsID, readLock, timeout.Timeout()) {
|
||||
timeout.LogFailure()
|
||||
for _, sint := range success {
|
||||
li.ns.unlock(li.volume, li.paths[sint], readLock)
|
||||
for si, sint := range success {
|
||||
if sint == 1 {
|
||||
li.ns.unlock(li.volume, li.paths[si], readLock)
|
||||
}
|
||||
}
|
||||
return nil, OperationTimedOut{}
|
||||
}
|
||||
success = append(success, i)
|
||||
success[i] = 1
|
||||
}
|
||||
timeout.LogSuccess(UTCNow().Sub(start))
|
||||
return ctx, nil
|
||||
|
|
|
@ -1147,7 +1147,7 @@ func (z xlMetaV2) TotalSize() int64 {
|
|||
// showPendingDeletes is set to true if ListVersions needs to list objects marked deleted
|
||||
// but waiting to be replicated
|
||||
func (z xlMetaV2) ListVersions(volume, path string) ([]FileInfo, time.Time, error) {
|
||||
var versions []FileInfo
|
||||
versions := make([]FileInfo, 0, len(z.Versions))
|
||||
var err error
|
||||
|
||||
for _, version := range z.Versions {
|
||||
|
|
Loading…
Reference in New Issue