2021-04-18 15:41:13 -04:00
|
|
|
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2017-08-14 21:08:42 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2018-04-05 18:04:40 -04:00
|
|
|
"context"
|
2020-08-03 21:17:48 -04:00
|
|
|
"errors"
|
2020-06-12 23:04:01 -04:00
|
|
|
"fmt"
|
2021-02-18 03:38:37 -05:00
|
|
|
"math/rand"
|
2021-02-26 12:52:27 -05:00
|
|
|
"os"
|
2022-08-08 19:16:44 -04:00
|
|
|
"runtime"
|
2020-06-12 23:04:01 -04:00
|
|
|
"sort"
|
2020-05-09 12:54:20 -04:00
|
|
|
"sync"
|
2020-06-12 23:04:01 -04:00
|
|
|
"time"
|
2017-08-14 21:08:42 -04:00
|
|
|
|
2023-06-19 20:53:08 -04:00
|
|
|
"github.com/minio/madmin-go/v3"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/dsync"
|
2023-08-14 15:28:13 -04:00
|
|
|
xioutil "github.com/minio/minio/internal/ioutil"
|
2021-06-01 17:59:40 -04:00
|
|
|
"github.com/minio/minio/internal/logger"
|
2023-09-04 15:57:37 -04:00
|
|
|
"github.com/minio/pkg/v2/sync/errgroup"
|
2017-08-14 21:08:42 -04:00
|
|
|
)
|
|
|
|
|
2023-01-17 09:07:47 -05:00
|
|
|
// list all errors that can be ignore in a bucket operation.
|
|
|
|
var bucketOpIgnoredErrs = append(baseIgnoredErrs, errDiskAccessDenied, errUnformattedDisk)
|
|
|
|
|
|
|
|
// list all errors that can be ignored in a bucket metadata operation.
|
|
|
|
var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// OfflineDisk represents an unavailable disk.
|
|
|
|
var OfflineDisk StorageAPI // zero value is nil
|
|
|
|
|
|
|
|
// erasureObjects - Implements ER object layer.
|
|
|
|
type erasureObjects struct {
|
2021-01-16 15:08:02 -05:00
|
|
|
setDriveCount int
|
|
|
|
defaultParityCount int
|
2020-12-07 13:04:07 -05:00
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
setIndex int
|
|
|
|
poolIndex int
|
2021-01-26 16:21:51 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// getDisks returns list of storageAPIs.
|
|
|
|
getDisks func() []StorageAPI
|
|
|
|
|
|
|
|
// getLockers returns list of remote and local lockers.
|
2020-09-25 22:21:52 -04:00
|
|
|
getLockers func() ([]dsync.NetLocker, string)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2024-02-23 19:19:13 -05:00
|
|
|
// getEndpoints returns list of endpoint belonging this set.
|
2020-06-12 23:04:01 -04:00
|
|
|
// some may be local and some remote.
|
2021-09-29 14:36:19 -04:00
|
|
|
getEndpoints func() []Endpoint
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2024-02-23 19:19:13 -05:00
|
|
|
// getEndpoints returns list of endpoint strings belonging this set.
|
|
|
|
// some may be local and some remote.
|
|
|
|
getEndpointStrings func() []string
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Locker mutex map.
|
|
|
|
nsMutex *nsLockMap
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// NewNSLock - initialize a new namespace RWLocker instance.
|
2020-11-04 11:25:42 -05:00
|
|
|
func (er erasureObjects) NewNSLock(bucket string, objects ...string) RWLocker {
|
|
|
|
return er.nsMutex.NewNSLock(er.getLockers, bucket, objects...)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown function for object storage interface.
|
|
|
|
func (er erasureObjects) Shutdown(ctx context.Context) error {
|
|
|
|
// Add any object layer shutdown activities here.
|
2022-05-30 13:58:37 -04:00
|
|
|
closeStorageDisks(er.getDisks()...)
|
2020-06-12 23:04:01 -04:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-12-28 15:41:52 -05:00
|
|
|
// defaultWQuorum write quorum based on setDriveCount and defaultParityCount
|
|
|
|
func (er erasureObjects) defaultWQuorum() int {
|
|
|
|
dataCount := er.setDriveCount - er.defaultParityCount
|
|
|
|
if dataCount == er.defaultParityCount {
|
|
|
|
return dataCount + 1
|
|
|
|
}
|
|
|
|
return dataCount
|
|
|
|
}
|
|
|
|
|
2020-07-13 12:51:07 -04:00
|
|
|
func diskErrToDriveState(err error) (state string) {
|
2020-08-03 21:17:48 -04:00
|
|
|
switch {
|
2022-10-18 06:01:16 -04:00
|
|
|
case errors.Is(err, errDiskNotFound) || errors.Is(err, context.DeadlineExceeded):
|
2020-07-13 12:51:07 -04:00
|
|
|
state = madmin.DriveStateOffline
|
2024-01-12 17:48:44 -05:00
|
|
|
case errors.Is(err, errCorruptedFormat) || errors.Is(err, errCorruptedBackend):
|
2020-07-13 12:51:07 -04:00
|
|
|
state = madmin.DriveStateCorrupt
|
2020-08-03 21:17:48 -04:00
|
|
|
case errors.Is(err, errUnformattedDisk):
|
2020-07-13 12:51:07 -04:00
|
|
|
state = madmin.DriveStateUnformatted
|
2020-08-03 21:17:48 -04:00
|
|
|
case errors.Is(err, errDiskAccessDenied):
|
2020-07-13 12:51:07 -04:00
|
|
|
state = madmin.DriveStatePermission
|
2020-08-03 21:17:48 -04:00
|
|
|
case errors.Is(err, errFaultyDisk):
|
2020-07-13 12:51:07 -04:00
|
|
|
state = madmin.DriveStateFaulty
|
2020-08-03 21:17:48 -04:00
|
|
|
case err == nil:
|
2020-07-13 12:51:07 -04:00
|
|
|
state = madmin.DriveStateOk
|
2022-10-13 19:41:44 -04:00
|
|
|
default:
|
|
|
|
state = fmt.Sprintf("%s (cause: %s)", madmin.DriveStateUnknown, err)
|
2020-07-13 12:51:07 -04:00
|
|
|
}
|
2022-10-13 19:41:44 -04:00
|
|
|
|
2020-07-13 12:51:07 -04:00
|
|
|
return
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-12-21 12:35:19 -05:00
|
|
|
func getOnlineOfflineDisksStats(disksInfo []madmin.Disk) (onlineDisks, offlineDisks madmin.BackendDisks) {
|
2020-06-12 23:04:01 -04:00
|
|
|
onlineDisks = make(madmin.BackendDisks)
|
|
|
|
offlineDisks = make(madmin.BackendDisks)
|
|
|
|
|
2020-12-21 12:35:19 -05:00
|
|
|
for _, disk := range disksInfo {
|
|
|
|
ep := disk.Endpoint
|
2020-06-29 16:07:26 -04:00
|
|
|
if _, ok := offlineDisks[ep]; !ok {
|
|
|
|
offlineDisks[ep] = 0
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2020-06-29 16:07:26 -04:00
|
|
|
if _, ok := onlineDisks[ep]; !ok {
|
|
|
|
onlineDisks[ep] = 0
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2020-05-09 12:54:20 -04:00
|
|
|
|
2020-12-21 12:35:19 -05:00
|
|
|
// Wait for the routines.
|
|
|
|
for _, disk := range disksInfo {
|
|
|
|
ep := disk.Endpoint
|
|
|
|
state := disk.State
|
|
|
|
if state != madmin.DriveStateOk && state != madmin.DriveStateUnformatted {
|
|
|
|
offlineDisks[ep]++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
onlineDisks[ep]++
|
|
|
|
}
|
|
|
|
|
|
|
|
rootDiskCount := 0
|
|
|
|
for _, di := range disksInfo {
|
|
|
|
if di.RootDisk {
|
|
|
|
rootDiskCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Count offline disks as well to ensure consistent
|
|
|
|
// reportability of offline drives on local setups.
|
|
|
|
if len(disksInfo) == (rootDiskCount + offlineDisks.Sum()) {
|
|
|
|
// Success.
|
|
|
|
return onlineDisks, offlineDisks
|
|
|
|
}
|
|
|
|
|
|
|
|
// Root disk should be considered offline
|
|
|
|
for i := range disksInfo {
|
|
|
|
ep := disksInfo[i].Endpoint
|
|
|
|
if disksInfo[i].RootDisk {
|
|
|
|
offlineDisks[ep]++
|
|
|
|
onlineDisks[ep]--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return onlineDisks, offlineDisks
|
|
|
|
}
|
|
|
|
|
|
|
|
// getDisksInfo - fetch disks info across all other storage API.
|
2023-12-21 19:56:43 -05:00
|
|
|
func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disksInfo []madmin.Disk) {
|
2020-12-21 12:35:19 -05:00
|
|
|
disksInfo = make([]madmin.Disk, len(disks))
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
g := errgroup.WithNErrs(len(disks))
|
|
|
|
for index := range disks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2023-07-20 10:48:21 -04:00
|
|
|
di := madmin.Disk{
|
|
|
|
Endpoint: endpoints[index].String(),
|
|
|
|
PoolIndex: endpoints[index].PoolIdx,
|
|
|
|
SetIndex: endpoints[index].SetIdx,
|
|
|
|
DiskIndex: endpoints[index].DiskIdx,
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
if disks[index] == OfflineDisk {
|
2023-07-20 10:48:21 -04:00
|
|
|
di.State = diskErrToDriveState(errDiskNotFound)
|
|
|
|
disksInfo[index] = di
|
2022-12-01 17:31:35 -05:00
|
|
|
return nil
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-01-25 15:45:46 -05:00
|
|
|
info, err := disks[index].DiskInfo(context.TODO(), DiskInfoOptions{Metrics: metrics})
|
2023-07-20 10:48:21 -04:00
|
|
|
di.DrivePath = info.MountPath
|
|
|
|
di.TotalSpace = info.Total
|
|
|
|
di.UsedSpace = info.Used
|
|
|
|
di.AvailableSpace = info.Free
|
|
|
|
di.UUID = info.ID
|
|
|
|
di.Major = info.Major
|
|
|
|
di.Minor = info.Minor
|
|
|
|
di.RootDisk = info.RootDisk
|
|
|
|
di.Healing = info.Healing
|
|
|
|
di.Scanning = info.Scanning
|
|
|
|
di.State = diskErrToDriveState(err)
|
|
|
|
di.FreeInodes = info.FreeInodes
|
|
|
|
di.UsedInodes = info.UsedInodes
|
2021-03-04 17:36:23 -05:00
|
|
|
if info.Healing {
|
|
|
|
if hi := disks[index].Healing(); hi != nil {
|
|
|
|
hd := hi.toHealingDisk()
|
|
|
|
di.HealInfo = &hd
|
|
|
|
}
|
|
|
|
}
|
2021-03-16 23:06:57 -04:00
|
|
|
di.Metrics = &madmin.DiskMetrics{
|
2023-08-01 15:47:50 -04:00
|
|
|
LastMinute: make(map[string]madmin.TimedAction, len(info.Metrics.LastMinute)),
|
|
|
|
APICalls: make(map[string]uint64, len(info.Metrics.APICalls)),
|
|
|
|
TotalErrorsAvailability: info.Metrics.TotalErrorsAvailability,
|
|
|
|
TotalErrorsTimeout: info.Metrics.TotalErrorsTimeout,
|
2024-01-19 17:51:36 -05:00
|
|
|
TotalWaiting: info.Metrics.TotalWaiting,
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
2022-07-05 17:45:49 -04:00
|
|
|
for k, v := range info.Metrics.LastMinute {
|
|
|
|
if v.N > 0 {
|
|
|
|
di.Metrics.LastMinute[k] = v.asTimedAction()
|
|
|
|
}
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
|
|
|
for k, v := range info.Metrics.APICalls {
|
|
|
|
di.Metrics.APICalls[k] = v
|
|
|
|
}
|
2020-07-13 12:51:07 -04:00
|
|
|
if info.Total > 0 {
|
|
|
|
di.Utilization = float64(info.Used / info.Total * 100)
|
|
|
|
}
|
|
|
|
disksInfo[index] = di
|
2022-12-01 17:31:35 -05:00
|
|
|
return nil
|
2020-06-12 23:04:01 -04:00
|
|
|
}, index)
|
2020-05-09 12:54:20 -04:00
|
|
|
}
|
|
|
|
|
2022-12-01 17:31:35 -05:00
|
|
|
g.Wait()
|
|
|
|
return disksInfo
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Get an aggregated storage info across all disks.
|
2023-12-21 19:56:43 -05:00
|
|
|
func getStorageInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) StorageInfo {
|
|
|
|
disksInfo := getDisksInfo(disks, endpoints, metrics)
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
// Sort so that the first element is the smallest.
|
2023-04-24 16:28:18 -04:00
|
|
|
sort.Slice(disksInfo, func(i, j int) bool {
|
|
|
|
return disksInfo[i].TotalSpace < disksInfo[j].TotalSpace
|
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
storageInfo := StorageInfo{
|
2020-07-13 12:51:07 -04:00
|
|
|
Disks: disksInfo,
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
storageInfo.Backend.Type = madmin.Erasure
|
2022-12-01 17:31:35 -05:00
|
|
|
return storageInfo
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// StorageInfo - returns underlying storage statistics.
|
2022-12-01 17:31:35 -05:00
|
|
|
func (er erasureObjects) StorageInfo(ctx context.Context) StorageInfo {
|
2020-06-12 23:04:01 -04:00
|
|
|
disks := er.getDisks()
|
2020-06-29 16:07:26 -04:00
|
|
|
endpoints := er.getEndpoints()
|
2023-12-21 19:56:43 -05:00
|
|
|
return getStorageInfo(disks, endpoints, true)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-03-02 20:28:04 -05:00
|
|
|
// LocalStorageInfo - returns underlying local storage statistics.
|
2023-12-21 19:56:43 -05:00
|
|
|
func (er erasureObjects) LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo {
|
2021-09-29 14:36:19 -04:00
|
|
|
disks := er.getDisks()
|
|
|
|
endpoints := er.getEndpoints()
|
|
|
|
|
|
|
|
var localDisks []StorageAPI
|
|
|
|
var localEndpoints []Endpoint
|
|
|
|
|
|
|
|
for i, endpoint := range endpoints {
|
|
|
|
if endpoint.IsLocal {
|
|
|
|
localDisks = append(localDisks, disks[i])
|
|
|
|
localEndpoints = append(localEndpoints, endpoint)
|
2021-03-02 20:28:04 -05:00
|
|
|
}
|
|
|
|
}
|
2021-09-29 14:36:19 -04:00
|
|
|
|
2023-12-21 19:56:43 -05:00
|
|
|
return getStorageInfo(localDisks, localEndpoints, metrics)
|
2021-03-02 20:28:04 -05:00
|
|
|
}
|
|
|
|
|
2023-12-29 18:52:41 -05:00
|
|
|
// getOnlineDisksWithHealingAndInfo - returns online disks and overall healing status.
|
2023-12-01 03:18:04 -05:00
|
|
|
// Disks are randomly ordered, but in the following groups:
|
|
|
|
// - Non-scanning disks
|
|
|
|
// - Non-healing disks
|
|
|
|
// - Healing disks (if inclHealing is true)
|
2023-12-29 18:52:41 -05:00
|
|
|
func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (newDisks []StorageAPI, newInfos []DiskInfo, healing bool) {
|
2021-01-19 05:40:52 -05:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
disks := er.getDisks()
|
|
|
|
infos := make([]DiskInfo, len(disks))
|
2023-07-14 05:25:40 -04:00
|
|
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
for _, i := range r.Perm(len(disks)) {
|
2021-01-19 05:40:52 -05:00
|
|
|
i := i
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
|
2023-07-14 05:25:40 -04:00
|
|
|
disk := disks[i]
|
2021-01-19 05:40:52 -05:00
|
|
|
if disk == nil {
|
2023-12-29 18:52:41 -05:00
|
|
|
infos[i].Error = errDiskNotFound.Error()
|
2021-01-19 05:40:52 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-01-25 15:45:46 -05:00
|
|
|
di, err := disk.DiskInfo(context.Background(), DiskInfoOptions{})
|
2023-12-29 18:52:41 -05:00
|
|
|
infos[i] = di
|
2023-12-01 03:18:04 -05:00
|
|
|
if err != nil {
|
2021-01-19 05:40:52 -05:00
|
|
|
// - Do not consume disks which are not reachable
|
|
|
|
// unformatted or simply not accessible for some reason.
|
2023-12-29 18:52:41 -05:00
|
|
|
infos[i].Error = err.Error()
|
2021-01-19 05:40:52 -05:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
2023-12-01 03:18:04 -05:00
|
|
|
var scanningDisks, healingDisks []StorageAPI
|
2023-12-29 18:52:41 -05:00
|
|
|
var scanningInfos, healingInfos []DiskInfo
|
|
|
|
|
2021-01-19 05:40:52 -05:00
|
|
|
for i, info := range infos {
|
|
|
|
// Check if one of the drives in the set is being healed.
|
2021-02-17 15:04:11 -05:00
|
|
|
// this information is used by scanner to skip healing
|
2021-01-19 05:40:52 -05:00
|
|
|
// this erasure set while it calculates the usage.
|
2023-12-01 03:18:04 -05:00
|
|
|
if info.Error != "" || disks[i] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if info.Healing {
|
2021-01-19 05:40:52 -05:00
|
|
|
healing = true
|
2023-12-01 03:18:04 -05:00
|
|
|
if inclHealing {
|
|
|
|
healingDisks = append(healingDisks, disks[i])
|
2023-12-29 18:52:41 -05:00
|
|
|
healingInfos = append(healingInfos, infos[i])
|
2023-12-01 03:18:04 -05:00
|
|
|
}
|
2021-01-19 05:40:52 -05:00
|
|
|
continue
|
|
|
|
}
|
2023-12-01 03:18:04 -05:00
|
|
|
|
2023-08-09 15:51:47 -04:00
|
|
|
if !info.Scanning {
|
|
|
|
newDisks = append(newDisks, disks[i])
|
2023-12-29 18:52:41 -05:00
|
|
|
newInfos = append(newInfos, infos[i])
|
2023-08-09 15:51:47 -04:00
|
|
|
} else {
|
|
|
|
scanningDisks = append(scanningDisks, disks[i])
|
2023-12-29 18:52:41 -05:00
|
|
|
scanningInfos = append(scanningInfos, infos[i])
|
2023-08-09 15:51:47 -04:00
|
|
|
}
|
2021-01-19 05:40:52 -05:00
|
|
|
}
|
|
|
|
|
2023-12-01 03:18:04 -05:00
|
|
|
// Prefer non-scanning disks over disks which are currently being scanned.
|
2023-08-09 15:51:47 -04:00
|
|
|
newDisks = append(newDisks, scanningDisks...)
|
2023-12-29 18:52:41 -05:00
|
|
|
newInfos = append(newInfos, scanningInfos...)
|
|
|
|
|
2023-12-01 03:18:04 -05:00
|
|
|
/// Then add healing disks.
|
|
|
|
newDisks = append(newDisks, healingDisks...)
|
2023-12-29 18:52:41 -05:00
|
|
|
newInfos = append(newInfos, healingInfos...)
|
2023-08-09 15:51:47 -04:00
|
|
|
|
2023-12-29 18:52:41 -05:00
|
|
|
return newDisks, newInfos, healing
|
|
|
|
}
|
|
|
|
|
|
|
|
func (er erasureObjects) getOnlineDisksWithHealing(inclHealing bool) (newDisks []StorageAPI, healing bool) {
|
|
|
|
newDisks, _, healing = er.getOnlineDisksWithHealingAndInfo(inclHealing)
|
|
|
|
return
|
2021-01-19 05:40:52 -05:00
|
|
|
}
|
|
|
|
|
2021-02-26 12:52:27 -05:00
|
|
|
// Clean-up previously deleted objects. from .minio.sys/tmp/.trash/
|
|
|
|
func (er erasureObjects) cleanupDeletedObjects(ctx context.Context) {
|
|
|
|
// run multiple cleanup's local to this server.
|
|
|
|
var wg sync.WaitGroup
|
2023-10-25 02:33:25 -04:00
|
|
|
for _, disk := range er.getLocalDisks() {
|
2021-02-26 12:52:27 -05:00
|
|
|
if disk != nil {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
|
|
|
diskPath := disk.Endpoint().Path
|
|
|
|
readDirFn(pathJoin(diskPath, minioMetaTmpDeletedBucket), func(ddir string, typ os.FileMode) error {
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-08-14 15:28:13 -04:00
|
|
|
return w.Run(func() error {
|
|
|
|
wait := deletedCleanupSleeper.Timer(ctx)
|
|
|
|
removeAll(pathJoin(diskPath, minioMetaTmpDeletedBucket, ddir))
|
|
|
|
wait()
|
|
|
|
return nil
|
|
|
|
})
|
2021-02-26 12:52:27 -05:00
|
|
|
})
|
|
|
|
}(disk)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2021-02-26 18:11:42 -05:00
|
|
|
// nsScanner will start scanning buckets and send updated totals as they are traversed.
|
2020-06-12 23:04:01 -04:00
|
|
|
// Updates are sent on a regular basis and the caller *must* consume them.
|
2023-02-23 22:33:31 -05:00
|
|
|
func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wantCycle uint32, updates chan<- dataUsageCache, healScanMode madmin.HealScanMode) error {
|
2020-08-25 13:55:15 -04:00
|
|
|
if len(buckets) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
2021-01-19 05:40:52 -05:00
|
|
|
// Collect disks we can use.
|
2023-12-01 03:18:04 -05:00
|
|
|
disks, healing := er.getOnlineDisksWithHealing(false)
|
2020-08-25 13:55:15 -04:00
|
|
|
if len(disks) == 0 {
|
2022-08-04 19:10:08 -04:00
|
|
|
logger.LogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle"))
|
2020-05-09 12:54:20 -04:00
|
|
|
return nil
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
// Load bucket totals
|
|
|
|
oldCache := dataUsageCache{}
|
2020-09-28 22:39:32 -04:00
|
|
|
if err := oldCache.load(ctx, er, dataUsageCacheName); err != nil {
|
2018-04-05 18:04:40 -04:00
|
|
|
return err
|
2017-08-14 21:08:42 -04:00
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// New cache..
|
|
|
|
cache := dataUsageCache{
|
|
|
|
Info: dataUsageCacheInfo{
|
|
|
|
Name: dataUsageRoot,
|
|
|
|
NextCycle: oldCache.Info.NextCycle,
|
|
|
|
},
|
|
|
|
Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)),
|
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Put all buckets into channel.
|
|
|
|
bucketCh := make(chan BucketInfo, len(buckets))
|
2023-07-14 05:25:40 -04:00
|
|
|
|
|
|
|
// Shuffle buckets to ensure total randomness of buckets, being scanned.
|
|
|
|
// Otherwise same set of buckets get scanned across erasure sets always.
|
|
|
|
// at any given point in time. This allows different buckets to be scanned
|
|
|
|
// in different order per erasure set, this wider spread is needed when
|
|
|
|
// there are lots of buckets with different order of objects in them.
|
|
|
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
permutes := r.Perm(len(buckets))
|
2020-06-12 23:04:01 -04:00
|
|
|
// Add new buckets first
|
2023-07-14 05:25:40 -04:00
|
|
|
for _, idx := range permutes {
|
|
|
|
b := buckets[idx]
|
|
|
|
if e := oldCache.find(b.Name); e == nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
bucketCh <- b
|
|
|
|
}
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
2023-07-14 05:25:40 -04:00
|
|
|
for _, idx := range permutes {
|
|
|
|
b := buckets[idx]
|
|
|
|
if e := oldCache.find(b.Name); e != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
cache.replace(b.Name, dataUsageRoot, *e)
|
2020-08-24 16:47:01 -04:00
|
|
|
bucketCh <- b
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2019-04-30 19:27:31 -04:00
|
|
|
}
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil.SafeClose(bucketCh)
|
2023-07-14 05:25:40 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
bucketResults := make(chan dataUsageEntryInfo, len(disks))
|
|
|
|
|
|
|
|
// Start async collector/saver.
|
|
|
|
// This goroutine owns the cache.
|
|
|
|
var saverWg sync.WaitGroup
|
|
|
|
saverWg.Add(1)
|
|
|
|
go func() {
|
2021-02-18 03:38:37 -05:00
|
|
|
// Add jitter to the update time so multiple sets don't sync up.
|
2022-01-02 12:15:06 -05:00
|
|
|
updateTime := 30*time.Second + time.Duration(float64(10*time.Second)*rand.Float64())
|
2020-06-12 23:04:01 -04:00
|
|
|
t := time.NewTicker(updateTime)
|
|
|
|
defer t.Stop()
|
|
|
|
defer saverWg.Done()
|
|
|
|
var lastSave time.Time
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-t.C:
|
|
|
|
if cache.Info.LastUpdate.Equal(lastSave) {
|
|
|
|
continue
|
|
|
|
}
|
2023-06-24 23:29:13 -04:00
|
|
|
logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update")
|
2020-06-12 23:04:01 -04:00
|
|
|
updates <- cache.clone()
|
2023-10-30 12:59:51 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
lastSave = cache.Info.LastUpdate
|
|
|
|
case v, ok := <-bucketResults:
|
|
|
|
if !ok {
|
2020-12-27 01:58:06 -05:00
|
|
|
// Save final state...
|
2021-08-25 11:25:26 -04:00
|
|
|
cache.Info.NextCycle = wantCycle
|
2020-12-27 01:58:06 -05:00
|
|
|
cache.Info.LastUpdate = time.Now()
|
2023-06-24 23:29:13 -04:00
|
|
|
logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed")
|
2023-10-30 12:59:51 -04:00
|
|
|
updates <- cache.clone()
|
2020-12-27 01:58:06 -05:00
|
|
|
return
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
cache.replace(v.Name, v.Parent, v.Entry)
|
|
|
|
cache.Info.LastUpdate = time.Now()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2022-08-08 19:16:44 -04:00
|
|
|
// Restrict parallelism for disk usage scanner
|
|
|
|
// upto GOMAXPROCS if GOMAXPROCS is < len(disks)
|
|
|
|
maxProcs := runtime.GOMAXPROCS(0)
|
|
|
|
if maxProcs < len(disks) {
|
|
|
|
disks = disks[:maxProcs]
|
|
|
|
}
|
|
|
|
|
2021-02-17 15:04:11 -05:00
|
|
|
// Start one scanner per disk
|
2020-06-12 23:04:01 -04:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(disks))
|
2023-03-09 18:15:46 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
for i := range disks {
|
|
|
|
go func(i int) {
|
|
|
|
defer wg.Done()
|
|
|
|
disk := disks[i]
|
|
|
|
|
|
|
|
for bucket := range bucketCh {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load cache for bucket
|
|
|
|
cacheName := pathJoin(bucket.Name, dataUsageCacheName)
|
|
|
|
cache := dataUsageCache{}
|
|
|
|
logger.LogIf(ctx, cache.load(ctx, er, cacheName))
|
|
|
|
if cache.Info.Name == "" {
|
|
|
|
cache.Info.Name = bucket.Name
|
|
|
|
}
|
2021-01-19 05:40:52 -05:00
|
|
|
cache.Info.SkipHealing = healing
|
2021-08-25 11:25:26 -04:00
|
|
|
cache.Info.NextCycle = wantCycle
|
2020-06-12 23:04:01 -04:00
|
|
|
if cache.Info.Name != bucket.Name {
|
|
|
|
cache.Info = dataUsageCacheInfo{
|
|
|
|
Name: bucket.Name,
|
|
|
|
LastUpdate: time.Time{},
|
2021-08-25 11:25:26 -04:00
|
|
|
NextCycle: wantCycle,
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
2021-05-19 17:38:30 -04:00
|
|
|
// Collect updates.
|
|
|
|
updates := make(chan dataUsageEntry, 1)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
2021-07-02 14:19:56 -04:00
|
|
|
go func(name string) {
|
2021-05-19 17:38:30 -04:00
|
|
|
defer wg.Done()
|
|
|
|
for update := range updates {
|
2023-03-01 00:34:45 -05:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
case bucketResults <- dataUsageEntryInfo{
|
2021-07-02 14:19:56 -04:00
|
|
|
Name: name,
|
2021-05-19 17:38:30 -04:00
|
|
|
Parent: dataUsageRoot,
|
|
|
|
Entry: update,
|
2023-03-01 00:34:45 -05:00
|
|
|
}:
|
2021-05-19 17:38:30 -04:00
|
|
|
}
|
|
|
|
}
|
2021-07-02 14:19:56 -04:00
|
|
|
}(cache.Info.Name)
|
2020-06-12 23:04:01 -04:00
|
|
|
// Calc usage
|
|
|
|
before := cache.Info.LastUpdate
|
2020-09-28 22:39:32 -04:00
|
|
|
var err error
|
2024-01-02 16:51:24 -05:00
|
|
|
cache, err = disk.NSScanner(ctx, cache, updates, healScanMode, nil)
|
2020-06-12 23:04:01 -04:00
|
|
|
if err != nil {
|
2021-02-22 13:04:32 -05:00
|
|
|
if !cache.Info.LastUpdate.IsZero() && cache.Info.LastUpdate.After(before) {
|
2020-06-12 23:04:01 -04:00
|
|
|
logger.LogIf(ctx, cache.save(ctx, er, cacheName))
|
2021-02-22 13:04:32 -05:00
|
|
|
} else {
|
|
|
|
logger.LogIf(ctx, err)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2022-01-19 03:46:43 -05:00
|
|
|
// This ensures that we don't close
|
|
|
|
// bucketResults channel while the
|
|
|
|
// updates-collector goroutine still
|
|
|
|
// holds a reference to this.
|
|
|
|
wg.Wait()
|
2020-06-12 23:04:01 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2021-05-19 17:38:30 -04:00
|
|
|
wg.Wait()
|
2020-06-12 23:04:01 -04:00
|
|
|
var root dataUsageEntry
|
|
|
|
if r := cache.root(); r != nil {
|
|
|
|
root = cache.flatten(*r)
|
|
|
|
}
|
2023-03-01 00:34:45 -05:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case bucketResults <- dataUsageEntryInfo{
|
2020-06-12 23:04:01 -04:00
|
|
|
Name: cache.Info.Name,
|
|
|
|
Parent: dataUsageRoot,
|
|
|
|
Entry: root,
|
2023-03-01 00:34:45 -05:00
|
|
|
}:
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2023-03-09 18:15:46 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Save cache
|
|
|
|
logger.LogIf(ctx, cache.save(ctx, er, cacheName))
|
|
|
|
}
|
|
|
|
}(i)
|
2019-01-17 07:58:18 -05:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
wg.Wait()
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil.SafeClose(bucketResults)
|
2020-06-12 23:04:01 -04:00
|
|
|
saverWg.Wait()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|