mirror of
https://github.com/minio/minio.git
synced 2025-01-11 15:03:22 -05:00
Preallocate (safe) slices when we know the size (#10459)
This commit is contained in:
parent
b1c99e88ac
commit
34859c6d4b
@ -408,7 +408,7 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
||||
// serialized to match XML and JSON API spec output.
|
||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
var listbuckets []Bucket
|
||||
listbuckets := make([]Bucket, 0, len(buckets))
|
||||
var data = ListBucketsResponse{}
|
||||
var owner = Owner{}
|
||||
|
||||
@ -428,8 +428,7 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
||||
var versions []ObjectVersion
|
||||
var prefixes []CommonPrefix
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListVersionsResponse{}
|
||||
|
||||
@ -473,6 +472,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
data.VersionIDMarker = versionIDMarker
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@ -484,8 +484,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
var contents []Object
|
||||
var prefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsResponse{}
|
||||
|
||||
@ -517,9 +516,10 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
data.Marker = s3EncodeName(marker, encodingType)
|
||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
||||
data.MaxKeys = maxKeys
|
||||
|
||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
||||
data.IsTruncated = resp.IsTruncated
|
||||
|
||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
||||
for _, prefix := range resp.Prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
@ -531,8 +531,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner = Owner{}
|
||||
var data = ListObjectsV2Response{}
|
||||
|
||||
@ -585,6 +584,8 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
||||
data.IsTruncated = isTruncated
|
||||
|
||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
||||
for _, prefix := range prefixes {
|
||||
var prefixItem = CommonPrefix{}
|
||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
||||
|
@ -637,8 +637,8 @@ func (z *erasureZones) ListObjectsV2(ctx context.Context, bucket, prefix, contin
|
||||
|
||||
func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
||||
|
||||
var zonesEntryChs [][]FileInfoCh
|
||||
var zonesListTolerancePerSet []int
|
||||
zonesEntryChs := make([][]FileInfoCh, 0, len(z.zones))
|
||||
zonesListTolerancePerSet := make([]int, 0, len(z.zones))
|
||||
|
||||
endWalkCh := make(chan struct{})
|
||||
defer close(endWalkCh)
|
||||
@ -653,8 +653,8 @@ func (z *erasureZones) listObjectsNonSlash(ctx context.Context, bucket, prefix,
|
||||
var eof bool
|
||||
var prevPrefix string
|
||||
|
||||
var zonesEntriesInfos [][]FileInfo
|
||||
var zonesEntriesValid [][]bool
|
||||
zonesEntriesInfos := make([][]FileInfo, 0, len(zonesEntryChs))
|
||||
zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs))
|
||||
for _, entryChs := range zonesEntryChs {
|
||||
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs)))
|
||||
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
||||
@ -756,9 +756,9 @@ func (z *erasureZones) listObjectsSplunk(ctx context.Context, bucket, prefix, ma
|
||||
|
||||
recursive := true
|
||||
|
||||
var zonesEntryChs [][]FileInfoCh
|
||||
var zonesEndWalkCh []chan struct{}
|
||||
var zonesListTolerancePerSet []int
|
||||
zonesEntryChs := make([][]FileInfoCh, 0, len(z.zones))
|
||||
zonesEndWalkCh := make([]chan struct{}, 0, len(z.zones))
|
||||
zonesListTolerancePerSet := make([]int, 0, len(z.zones))
|
||||
|
||||
for _, zone := range z.zones {
|
||||
entryChs, endWalkCh := zone.poolSplunk.Release(listParams{bucket, recursive, marker, prefix})
|
||||
@ -848,9 +848,9 @@ func (z *erasureZones) listObjects(ctx context.Context, bucket, prefix, marker,
|
||||
recursive = false
|
||||
}
|
||||
|
||||
var zonesEntryChs [][]FileInfoCh
|
||||
var zonesEndWalkCh []chan struct{}
|
||||
var zonesListTolerancePerSet []int
|
||||
zonesEntryChs := make([][]FileInfoCh, 0, len(z.zones))
|
||||
zonesEndWalkCh := make([]chan struct{}, 0, len(z.zones))
|
||||
zonesListTolerancePerSet := make([]int, 0, len(z.zones))
|
||||
|
||||
for _, zone := range z.zones {
|
||||
entryChs, endWalkCh := zone.pool.Release(listParams{bucket, recursive, marker, prefix})
|
||||
@ -1051,8 +1051,8 @@ func lexicallySortedEntryZoneVersions(zoneEntryChs [][]FileInfoVersionsCh, zoneE
|
||||
// mergeZonesEntriesVersionsCh - merges FileInfoVersions channel to entries upto maxKeys.
|
||||
func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys int, zonesListTolerancePerSet []int) (entries FilesInfoVersions) {
|
||||
var i = 0
|
||||
var zonesEntriesInfos [][]FileInfoVersions
|
||||
var zonesEntriesValid [][]bool
|
||||
zonesEntriesInfos := make([][]FileInfoVersions, 0, len(zonesEntryChs))
|
||||
zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs))
|
||||
for _, entryChs := range zonesEntryChs {
|
||||
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs)))
|
||||
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
||||
@ -1082,8 +1082,8 @@ func mergeZonesEntriesVersionsCh(zonesEntryChs [][]FileInfoVersionsCh, maxKeys i
|
||||
// mergeZonesEntriesCh - merges FileInfo channel to entries upto maxKeys.
|
||||
func mergeZonesEntriesCh(zonesEntryChs [][]FileInfoCh, maxKeys int, zonesListTolerancePerSet []int) (entries FilesInfo) {
|
||||
var i = 0
|
||||
var zonesEntriesInfos [][]FileInfo
|
||||
var zonesEntriesValid [][]bool
|
||||
zonesEntriesInfos := make([][]FileInfo, 0, len(zonesEntryChs))
|
||||
zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs))
|
||||
for _, entryChs := range zonesEntryChs {
|
||||
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs)))
|
||||
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
||||
@ -1218,9 +1218,9 @@ func (z *erasureZones) listObjectVersions(ctx context.Context, bucket, prefix, m
|
||||
recursive = false
|
||||
}
|
||||
|
||||
var zonesEntryChs [][]FileInfoVersionsCh
|
||||
var zonesEndWalkCh []chan struct{}
|
||||
var zonesListTolerancePerSet []int
|
||||
zonesEntryChs := make([][]FileInfoVersionsCh, 0, len(z.zones))
|
||||
zonesEndWalkCh := make([]chan struct{}, 0, len(z.zones))
|
||||
zonesListTolerancePerSet := make([]int, 0, len(z.zones))
|
||||
for _, zone := range z.zones {
|
||||
entryChs, endWalkCh := zone.poolVersions.Release(listParams{bucket, recursive, marker, prefix})
|
||||
if entryChs == nil {
|
||||
@ -1737,18 +1737,15 @@ func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results
|
||||
return nil
|
||||
}
|
||||
|
||||
var zonesEntryChs [][]FileInfoCh
|
||||
zonesEntryChs := make([][]FileInfoCh, 0, len(z.zones))
|
||||
zoneDrivesPerSet := make([]int, 0, len(z.zones))
|
||||
for _, zone := range z.zones {
|
||||
zonesEntryChs = append(zonesEntryChs, zone.startMergeWalks(ctx, bucket, prefix, "", true, ctx.Done()))
|
||||
}
|
||||
|
||||
var zoneDrivesPerSet []int
|
||||
for _, zone := range z.zones {
|
||||
zoneDrivesPerSet = append(zoneDrivesPerSet, zone.setDriveCount)
|
||||
}
|
||||
|
||||
var zonesEntriesInfos [][]FileInfo
|
||||
var zonesEntriesValid [][]bool
|
||||
zonesEntriesInfos := make([][]FileInfo, 0, len(zonesEntryChs))
|
||||
zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs))
|
||||
for _, entryChs := range zonesEntryChs {
|
||||
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfo, len(entryChs)))
|
||||
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
||||
@ -1779,23 +1776,20 @@ func (z *erasureZones) Walk(ctx context.Context, bucket, prefix string, results
|
||||
type HealObjectFn func(bucket, object, versionID string) error
|
||||
|
||||
func (z *erasureZones) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject HealObjectFn) error {
|
||||
var zonesEntryChs [][]FileInfoVersionsCh
|
||||
|
||||
endWalkCh := make(chan struct{})
|
||||
defer close(endWalkCh)
|
||||
|
||||
zonesEntryChs := make([][]FileInfoVersionsCh, 0, len(z.zones))
|
||||
zoneDrivesPerSet := make([]int, 0, len(z.zones))
|
||||
|
||||
for _, zone := range z.zones {
|
||||
zonesEntryChs = append(zonesEntryChs,
|
||||
zone.startMergeWalksVersions(ctx, bucket, prefix, "", true, endWalkCh))
|
||||
}
|
||||
|
||||
var zoneDrivesPerSet []int
|
||||
for _, zone := range z.zones {
|
||||
zoneDrivesPerSet = append(zoneDrivesPerSet, zone.setDriveCount)
|
||||
}
|
||||
|
||||
var zonesEntriesInfos [][]FileInfoVersions
|
||||
var zonesEntriesValid [][]bool
|
||||
zonesEntriesInfos := make([][]FileInfoVersions, 0, len(zonesEntryChs))
|
||||
zonesEntriesValid := make([][]bool, 0, len(zonesEntryChs))
|
||||
for _, entryChs := range zonesEntryChs {
|
||||
zonesEntriesInfos = append(zonesEntriesInfos, make([]FileInfoVersions, len(entryChs)))
|
||||
zonesEntriesValid = append(zonesEntriesValid, make([]bool, len(entryChs)))
|
||||
|
@ -502,13 +502,13 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||
atomic.AddInt64(&fs.activeIOCount, -1)
|
||||
}()
|
||||
|
||||
var bucketInfos []BucketInfo
|
||||
entries, err := readDir(fs.fsPath)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, errDiskNotFound)
|
||||
return nil, toObjectErr(errDiskNotFound)
|
||||
}
|
||||
|
||||
bucketInfos := make([]BucketInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
// Ignore all reserved bucket names and invalid bucket names.
|
||||
if isReservedOrInvalidBucket(entry, false) {
|
||||
|
@ -2557,7 +2557,7 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite
|
||||
}
|
||||
|
||||
// Complete parts.
|
||||
var completeParts []CompletePart
|
||||
completeParts := make([]CompletePart, 0, len(complMultipartUpload.Parts))
|
||||
for _, part := range complMultipartUpload.Parts {
|
||||
part.ETag = canonicalizeETag(part.ETag)
|
||||
if isEncrypted {
|
||||
|
@ -829,8 +829,9 @@ func (client *peerRESTClient) ConsoleLog(logCh chan interface{}, doneCh <-chan s
|
||||
}
|
||||
|
||||
func getRemoteHosts(endpointZones EndpointZones) []*xnet.Host {
|
||||
var remoteHosts []*xnet.Host
|
||||
for _, hostStr := range GetRemotePeers(endpointZones) {
|
||||
peers := GetRemotePeers(endpointZones)
|
||||
remoteHosts := make([]*xnet.Host, 0, len(peers))
|
||||
for _, hostStr := range peers {
|
||||
host, err := xnet.ParseHost(hostStr)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
|
@ -47,7 +47,7 @@ func (s *peerRESTServer) GetLocksHandler(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
ctx := newContext(r, w, "GetLocks")
|
||||
|
||||
var llockers []map[string][]lockRequesterInfo
|
||||
llockers := make([]map[string][]lockRequesterInfo, 0, len(globalLockServers))
|
||||
for _, llocker := range globalLockServers {
|
||||
llockers = append(llockers, llocker.DupLockMap())
|
||||
}
|
||||
|
@ -654,7 +654,7 @@ func listVols(dirPath string) ([]VolInfo, error) {
|
||||
if err != nil {
|
||||
return nil, errDiskNotFound
|
||||
}
|
||||
var volsInfo []VolInfo
|
||||
volsInfo := make([]VolInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if !HasSuffix(entry, SlashSeparator) || !isValidVolname(slashpath.Clean(entry)) {
|
||||
// Skip if entry is neither a directory not a valid volume name.
|
||||
|
Loading…
Reference in New Issue
Block a user