2016-05-30 19:51:59 -04:00
|
|
|
/*
|
2020-06-12 23:04:01 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
|
2016-05-30 19:51:59 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2016-08-18 19:23:42 -04:00
|
|
|
package cmd
|
2016-05-30 19:51:59 -04:00
|
|
|
|
2017-09-25 17:47:58 -04:00
|
|
|
import (
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2020-09-29 12:54:41 -04:00
|
|
|
"sync"
|
2019-10-14 12:44:51 -04:00
|
|
|
|
|
|
|
"github.com/minio/minio/pkg/sync/errgroup"
|
2017-09-25 17:47:58 -04:00
|
|
|
)
|
2016-05-30 19:51:59 -04:00
|
|
|
|
2020-08-26 22:29:35 -04:00
|
|
|
func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) {
|
|
|
|
disks := er.getDisks()
|
|
|
|
// Based on the random shuffling return back randomized disks.
|
|
|
|
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
|
|
|
if disks[i-1] != nil && disks[i-1].IsLocal() {
|
2020-09-28 22:39:32 -04:00
|
|
|
if !disks[i-1].Healing() && disks[i-1].IsOnline() {
|
|
|
|
newDisks = append(newDisks, disks[i-1])
|
|
|
|
}
|
2020-08-26 22:29:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return newDisks
|
|
|
|
}
|
|
|
|
|
2020-10-14 15:12:10 -04:00
|
|
|
func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) {
|
|
|
|
disks := er.getDisks()
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
var mu sync.Mutex
|
|
|
|
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
|
|
|
i := i
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
if disks[i-1] == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
di, err := disks[i-1].DiskInfo(context.Background())
|
|
|
|
if err != nil || di.Healing {
|
2020-10-28 12:18:35 -04:00
|
|
|
|
2020-10-14 15:12:10 -04:00
|
|
|
// - Do not consume disks which are not reachable
|
|
|
|
// unformatted or simply not accessible for some reason.
|
|
|
|
//
|
|
|
|
// - Do not consume disks which are being healed
|
|
|
|
//
|
|
|
|
// - Future: skip busy disks
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
mu.Lock()
|
|
|
|
newDisks = append(newDisks, disks[i-1])
|
|
|
|
mu.Unlock()
|
|
|
|
}()
|
|
|
|
}
|
2020-10-15 09:37:10 -04:00
|
|
|
wg.Wait()
|
2020-10-14 15:12:10 -04:00
|
|
|
return newDisks
|
|
|
|
}
|
|
|
|
|
2020-09-10 12:27:52 -04:00
|
|
|
// getLoadBalancedNDisks - fetches load balanced (sufficiently randomized) disk slice
|
|
|
|
// with N disks online. If ndisks is zero or negative, then it will returns all disks,
|
|
|
|
// same if ndisks is greater than the number of all disks.
|
2020-09-04 19:07:40 -04:00
|
|
|
func (er erasureObjects) getLoadBalancedNDisks(ndisks int) (newDisks []StorageAPI) {
|
2020-10-09 18:40:46 -04:00
|
|
|
disks := er.getLoadBalancedDisks(ndisks != -1)
|
2020-09-04 19:07:40 -04:00
|
|
|
for _, disk := range disks {
|
2020-10-15 16:06:23 -04:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-09-04 19:07:40 -04:00
|
|
|
newDisks = append(newDisks, disk)
|
|
|
|
ndisks--
|
|
|
|
if ndisks == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-07-21 03:27:08 -04:00
|
|
|
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
|
2020-09-28 22:39:32 -04:00
|
|
|
// ensures to skip disks if they are not healing and online.
|
2020-10-09 18:40:46 -04:00
|
|
|
func (er erasureObjects) getLoadBalancedDisks(optimized bool) []StorageAPI {
|
2020-06-12 23:04:01 -04:00
|
|
|
disks := er.getDisks()
|
2020-09-28 22:39:32 -04:00
|
|
|
|
2020-10-09 18:40:46 -04:00
|
|
|
if !optimized {
|
|
|
|
var newDisks []StorageAPI
|
|
|
|
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
|
|
|
newDisks = append(newDisks, disks[i-1])
|
|
|
|
}
|
|
|
|
return newDisks
|
|
|
|
}
|
|
|
|
|
2020-09-29 12:54:41 -04:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
var mu sync.Mutex
|
2020-09-30 01:54:02 -04:00
|
|
|
var newDisks = map[uint64][]StorageAPI{}
|
2016-06-01 19:43:31 -04:00
|
|
|
// Based on the random shuffling return back randomized disks.
|
2020-03-25 02:26:13 -04:00
|
|
|
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
|
2020-09-29 12:54:41 -04:00
|
|
|
i := i
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
if disks[i-1] == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
di, err := disks[i-1].DiskInfo(context.Background())
|
|
|
|
if err != nil || di.Healing {
|
|
|
|
// - Do not consume disks which are not reachable
|
|
|
|
// unformatted or simply not accessible for some reason.
|
|
|
|
//
|
|
|
|
// - Do not consume disks which are being healed
|
|
|
|
//
|
|
|
|
// - Future: skip busy disks
|
|
|
|
return
|
|
|
|
}
|
2020-09-30 01:54:02 -04:00
|
|
|
|
2020-09-29 12:54:41 -04:00
|
|
|
mu.Lock()
|
2020-10-14 15:12:10 -04:00
|
|
|
// Capture disks usage wise upto resolution of MiB
|
|
|
|
newDisks[di.Used/1024/1024] = append(newDisks[di.Used/1024/1024], disks[i-1])
|
2020-09-29 12:54:41 -04:00
|
|
|
mu.Unlock()
|
|
|
|
}()
|
2016-06-01 19:43:31 -04:00
|
|
|
}
|
2020-09-29 12:54:41 -04:00
|
|
|
wg.Wait()
|
2020-09-30 01:54:02 -04:00
|
|
|
|
|
|
|
var max uint64
|
|
|
|
for k := range newDisks {
|
|
|
|
if k > max {
|
|
|
|
max = k
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return disks which have maximum disk usage common.
|
|
|
|
return newDisks[max]
|
2016-05-30 19:51:59 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// This function does the following check, suppose
|
2021-01-18 15:25:22 -05:00
|
|
|
// object is "a/b/c/d", stat makes sure that objects
|
|
|
|
// - "a/b/c"
|
|
|
|
// - "a/b"
|
|
|
|
// - "a"
|
|
|
|
// do not exist on the namespace.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (er erasureObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
|
|
|
storageDisks := er.getDisks()
|
2019-10-14 12:44:51 -04:00
|
|
|
|
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
|
|
|
|
2019-10-23 01:59:13 -04:00
|
|
|
for index := range storageDisks {
|
|
|
|
index := index
|
2019-10-14 12:44:51 -04:00
|
|
|
g.Go(func() error {
|
2019-10-18 13:49:11 -04:00
|
|
|
if storageDisks[index] == nil {
|
|
|
|
return errDiskNotFound
|
|
|
|
}
|
2019-02-15 19:21:19 -05:00
|
|
|
// Check if 'prefix' is an object on this 'disk', else continue the check the next disk
|
2021-01-18 15:25:22 -05:00
|
|
|
return storageDisks[index].CheckFile(ctx, bucket, parent)
|
2019-10-23 01:59:13 -04:00
|
|
|
}, index)
|
2019-02-15 19:21:19 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// NOTE: Observe we are not trying to read `xl.meta` and figure out the actual
|
2019-02-15 19:21:19 -05:00
|
|
|
// quorum intentionally, but rely on the default case scenario. Actual quorum
|
|
|
|
// verification will happen by top layer by using getObjectInfo() and will be
|
|
|
|
// ignored if necessary.
|
2020-03-31 12:32:16 -04:00
|
|
|
readQuorum := getReadQuorum(len(storageDisks))
|
2019-02-15 19:21:19 -05:00
|
|
|
|
2021-01-18 15:25:22 -05:00
|
|
|
return reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil
|
2016-05-30 19:51:59 -04:00
|
|
|
}
|