2024-01-18 14:17:43 -05:00
|
|
|
// Copyright (c) 2015-2024 MinIO, Inc.
|
2021-04-18 15:41:13 -04:00
|
|
|
//
|
|
|
|
// This file is part of MinIO Object Storage stack
|
|
|
|
//
|
|
|
|
// This program is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Affero General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This program is distributed in the hope that it will be useful
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Affero General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Affero General Public License
|
|
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
2020-06-12 23:04:01 -04:00
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2022-03-09 14:38:54 -05:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2020-06-12 23:04:01 -04:00
|
|
|
"io"
|
2022-03-09 14:38:54 -05:00
|
|
|
"math/rand"
|
2023-07-05 13:40:45 -04:00
|
|
|
"runtime"
|
2022-03-09 14:38:54 -05:00
|
|
|
"strconv"
|
2021-03-27 02:24:07 -04:00
|
|
|
"strings"
|
2021-03-16 23:06:57 -04:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2023-06-19 20:53:08 -04:00
|
|
|
"github.com/minio/madmin-go/v3"
|
2024-02-23 16:28:14 -05:00
|
|
|
"github.com/minio/minio/internal/cachevalue"
|
2024-04-23 01:07:19 -04:00
|
|
|
"github.com/minio/minio/internal/grid"
|
2023-07-13 14:41:55 -04:00
|
|
|
xioutil "github.com/minio/minio/internal/ioutil"
|
2022-03-09 14:38:54 -05:00
|
|
|
"github.com/minio/minio/internal/logger"
|
2021-03-16 23:06:57 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
//go:generate stringer -type=storageMetric -trimprefix=storageMetric $GOFILE
|
|
|
|
|
|
|
|
type storageMetric uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
storageMetricMakeVolBulk storageMetric = iota
|
|
|
|
storageMetricMakeVol
|
|
|
|
storageMetricListVols
|
|
|
|
storageMetricStatVol
|
|
|
|
storageMetricDeleteVol
|
|
|
|
storageMetricWalkDir
|
|
|
|
storageMetricListDir
|
|
|
|
storageMetricReadFile
|
|
|
|
storageMetricAppendFile
|
|
|
|
storageMetricCreateFile
|
|
|
|
storageMetricReadFileStream
|
|
|
|
storageMetricRenameFile
|
|
|
|
storageMetricRenameData
|
|
|
|
storageMetricCheckParts
|
|
|
|
storageMetricDelete
|
|
|
|
storageMetricDeleteVersions
|
|
|
|
storageMetricVerifyFile
|
|
|
|
storageMetricWriteAll
|
|
|
|
storageMetricDeleteVersion
|
|
|
|
storageMetricWriteMetadata
|
2021-04-04 16:32:31 -04:00
|
|
|
storageMetricUpdateMetadata
|
2021-03-16 23:06:57 -04:00
|
|
|
storageMetricReadVersion
|
2022-04-20 15:49:05 -04:00
|
|
|
storageMetricReadXL
|
2021-03-16 23:06:57 -04:00
|
|
|
storageMetricReadAll
|
2021-12-02 14:29:16 -05:00
|
|
|
storageMetricStatInfoFile
|
2022-07-19 11:35:29 -04:00
|
|
|
storageMetricReadMultiple
|
2022-11-28 13:20:55 -05:00
|
|
|
storageMetricDeleteAbandonedParts
|
2022-12-01 15:10:54 -05:00
|
|
|
storageMetricDiskInfo
|
2024-07-29 21:56:40 -04:00
|
|
|
storageMetricDeleteBulk
|
2021-03-16 23:06:57 -04:00
|
|
|
|
|
|
|
// .... add more
|
|
|
|
|
2021-03-27 02:24:07 -04:00
|
|
|
storageMetricLast
|
2020-06-12 23:04:01 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
// Detects change in underlying disk.
|
|
|
|
type xlStorageDiskIDCheck struct {
|
2023-12-29 18:52:41 -05:00
|
|
|
totalWrites atomic.Uint64
|
|
|
|
totalDeletes atomic.Uint64
|
2024-04-11 17:22:15 -04:00
|
|
|
totalErrsAvailability atomic.Uint64 // Captures all data availability errors such as faulty disk, timeout errors.
|
2023-12-29 18:52:41 -05:00
|
|
|
totalErrsTimeout atomic.Uint64 // Captures all timeout only errors
|
2023-12-04 14:54:13 -05:00
|
|
|
|
2022-03-17 13:57:52 -04:00
|
|
|
// apiCalls should be placed first so alignment is guaranteed for atomic operations.
|
|
|
|
apiCalls [storageMetricLast]uint64
|
2022-01-25 19:31:44 -05:00
|
|
|
apiLatencies [storageMetricLast]*lockedLastMinuteLatency
|
2024-04-15 04:25:46 -04:00
|
|
|
diskID atomic.Pointer[string]
|
2022-03-17 13:57:52 -04:00
|
|
|
storage *xlStorage
|
2022-03-09 14:38:54 -05:00
|
|
|
health *diskHealthTracker
|
2024-01-30 02:03:58 -05:00
|
|
|
healthCheck bool
|
2023-10-16 20:18:13 -04:00
|
|
|
|
2024-02-23 16:28:14 -05:00
|
|
|
metricsCache *cachevalue.Cache[DiskMetrics]
|
2023-07-13 14:41:55 -04:00
|
|
|
diskCtx context.Context
|
2024-01-23 17:11:46 -05:00
|
|
|
diskCancel context.CancelFunc
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *xlStorageDiskIDCheck) getMetrics() DiskMetrics {
|
2024-02-28 12:09:09 -05:00
|
|
|
p.metricsCache.InitOnce(5*time.Second,
|
|
|
|
cachevalue.Opts{},
|
2024-05-08 20:51:34 -04:00
|
|
|
func(ctx context.Context) (DiskMetrics, error) {
|
2022-07-05 14:02:30 -04:00
|
|
|
diskMetric := DiskMetrics{
|
2022-07-05 17:45:49 -04:00
|
|
|
LastMinute: make(map[string]AccElem, len(p.apiLatencies)),
|
|
|
|
APICalls: make(map[string]uint64, len(p.apiCalls)),
|
2022-07-05 14:02:30 -04:00
|
|
|
}
|
|
|
|
for i, v := range p.apiLatencies {
|
2022-07-05 17:45:49 -04:00
|
|
|
diskMetric.LastMinute[storageMetric(i).String()] = v.total()
|
2022-07-05 14:02:30 -04:00
|
|
|
}
|
|
|
|
for i := range p.apiCalls {
|
|
|
|
diskMetric.APICalls[storageMetric(i).String()] = atomic.LoadUint64(&p.apiCalls[i])
|
|
|
|
}
|
|
|
|
return diskMetric, nil
|
2024-02-28 12:09:09 -05:00
|
|
|
},
|
|
|
|
)
|
2023-12-04 14:54:13 -05:00
|
|
|
|
2024-05-08 20:51:34 -04:00
|
|
|
diskMetric, _ := p.metricsCache.GetWithCtx(context.Background())
|
2023-12-04 14:54:13 -05:00
|
|
|
// Do not need this value to be cached.
|
|
|
|
diskMetric.TotalErrorsTimeout = p.totalErrsTimeout.Load()
|
|
|
|
diskMetric.TotalErrorsAvailability = p.totalErrsAvailability.Load()
|
2024-01-18 14:17:43 -05:00
|
|
|
|
2023-12-04 14:54:13 -05:00
|
|
|
return diskMetric
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
|
|
|
|
2023-07-05 13:40:45 -04:00
|
|
|
// lockedLastMinuteLatency accumulates totals lockless for each second.
|
2022-01-25 19:31:44 -05:00
|
|
|
type lockedLastMinuteLatency struct {
|
2023-07-05 13:40:45 -04:00
|
|
|
cachedSec int64
|
|
|
|
cached atomic.Pointer[AccElem]
|
|
|
|
mu sync.Mutex
|
|
|
|
init sync.Once
|
2022-01-25 19:31:44 -05:00
|
|
|
lastMinuteLatency
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
|
|
|
|
2022-01-25 19:31:44 -05:00
|
|
|
func (e *lockedLastMinuteLatency) add(value time.Duration) {
|
2023-07-05 13:40:45 -04:00
|
|
|
e.addSize(value, 0)
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
|
|
|
|
2022-07-05 17:45:49 -04:00
|
|
|
// addSize will add a duration and size.
|
|
|
|
func (e *lockedLastMinuteLatency) addSize(value time.Duration, sz int64) {
|
2023-07-05 13:40:45 -04:00
|
|
|
// alloc on every call, so we have a clean entry to swap in.
|
|
|
|
t := time.Now().Unix()
|
|
|
|
e.init.Do(func() {
|
|
|
|
e.cached.Store(&AccElem{})
|
|
|
|
atomic.StoreInt64(&e.cachedSec, t)
|
|
|
|
})
|
|
|
|
acc := e.cached.Load()
|
|
|
|
if lastT := atomic.LoadInt64(&e.cachedSec); lastT != t {
|
|
|
|
// Check if lastT was changed by someone else.
|
|
|
|
if atomic.CompareAndSwapInt64(&e.cachedSec, lastT, t) {
|
|
|
|
// Now we swap in a new.
|
|
|
|
newAcc := &AccElem{}
|
|
|
|
old := e.cached.Swap(newAcc)
|
|
|
|
var a AccElem
|
|
|
|
a.Size = atomic.LoadInt64(&old.Size)
|
|
|
|
a.Total = atomic.LoadInt64(&old.Total)
|
|
|
|
a.N = atomic.LoadInt64(&old.N)
|
|
|
|
e.mu.Lock()
|
|
|
|
e.lastMinuteLatency.addAll(t-1, a)
|
|
|
|
e.mu.Unlock()
|
|
|
|
acc = newAcc
|
|
|
|
} else {
|
|
|
|
// We may be able to grab the new accumulator by yielding.
|
|
|
|
runtime.Gosched()
|
|
|
|
acc = e.cached.Load()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
atomic.AddInt64(&acc.N, 1)
|
|
|
|
atomic.AddInt64(&acc.Total, int64(value))
|
|
|
|
atomic.AddInt64(&acc.Size, sz)
|
2022-07-05 17:45:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// total returns the total call count and latency for the last minute.
|
|
|
|
func (e *lockedLastMinuteLatency) total() AccElem {
|
2023-07-05 13:40:45 -04:00
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
2022-07-05 17:45:49 -04:00
|
|
|
return e.lastMinuteLatency.getTotal()
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
|
|
|
|
2023-07-13 14:41:55 -04:00
|
|
|
func newXLStorageDiskIDCheck(storage *xlStorage, healthCheck bool) *xlStorageDiskIDCheck {
|
2021-03-16 23:06:57 -04:00
|
|
|
xl := xlStorageDiskIDCheck{
|
2024-02-23 12:21:38 -05:00
|
|
|
storage: storage,
|
|
|
|
health: newDiskHealthTracker(),
|
|
|
|
healthCheck: healthCheck && globalDriveMonitoring,
|
2024-02-23 16:28:14 -05:00
|
|
|
metricsCache: cachevalue.New[DiskMetrics](),
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
2024-04-15 04:25:46 -04:00
|
|
|
xl.SetDiskID(emptyDiskID)
|
2023-12-29 18:52:41 -05:00
|
|
|
|
2024-01-30 02:03:58 -05:00
|
|
|
xl.totalWrites.Store(xl.storage.getWriteAttribute())
|
|
|
|
xl.totalDeletes.Store(xl.storage.getDeleteAttribute())
|
2024-01-23 17:11:46 -05:00
|
|
|
xl.diskCtx, xl.diskCancel = context.WithCancel(context.TODO())
|
2021-03-16 23:06:57 -04:00
|
|
|
for i := range xl.apiLatencies[:] {
|
2022-01-25 19:31:44 -05:00
|
|
|
xl.apiLatencies[i] = &lockedLastMinuteLatency{}
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
2024-01-30 02:03:58 -05:00
|
|
|
if xl.healthCheck {
|
2023-07-13 14:41:55 -04:00
|
|
|
go xl.monitorDiskWritable(xl.diskCtx)
|
|
|
|
}
|
2021-03-16 23:06:57 -04:00
|
|
|
return &xl
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *xlStorageDiskIDCheck) String() string {
|
|
|
|
return p.storage.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *xlStorageDiskIDCheck) IsOnline() bool {
|
|
|
|
storedDiskID, err := p.storage.GetDiskID()
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
2024-04-15 04:25:46 -04:00
|
|
|
return storedDiskID == *p.diskID.Load()
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-05-11 12:19:15 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) LastConn() time.Time {
|
|
|
|
return p.storage.LastConn()
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) IsLocal() bool {
|
|
|
|
return p.storage.IsLocal()
|
|
|
|
}
|
|
|
|
|
2020-09-28 22:39:32 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) Endpoint() Endpoint {
|
|
|
|
return p.storage.Endpoint()
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) Hostname() string {
|
|
|
|
return p.storage.Hostname()
|
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) Healing() *healingTracker {
|
2020-09-28 22:39:32 -04:00
|
|
|
return p.storage.Healing()
|
|
|
|
}
|
|
|
|
|
2024-01-02 16:51:24 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) NSScanner(ctx context.Context, cache dataUsageCache, updates chan<- dataUsageEntry, scanMode madmin.HealScanMode, _ func() bool) (dataUsageCache, error) {
|
2021-09-17 17:11:01 -04:00
|
|
|
if contextCanceled(ctx) {
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil.SafeClose(updates)
|
2021-03-03 21:36:43 -05:00
|
|
|
return dataUsageCache{}, ctx.Err()
|
|
|
|
}
|
|
|
|
|
2020-07-13 12:51:07 -04:00
|
|
|
if err := p.checkDiskStale(); err != nil {
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil.SafeClose(updates)
|
2020-07-13 12:51:07 -04:00
|
|
|
return dataUsageCache{}, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-01-02 16:51:24 -05:00
|
|
|
|
|
|
|
weSleep := func() bool {
|
2024-01-04 18:07:17 -05:00
|
|
|
return scannerIdleMode.Load() == 0
|
2024-01-02 16:51:24 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return p.storage.NSScanner(ctx, cache, updates, scanMode, weSleep)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) GetDiskLoc() (poolIdx, setIdx, diskIdx int) {
|
|
|
|
return p.storage.GetDiskLoc()
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) Close() error {
|
2024-01-23 17:11:46 -05:00
|
|
|
p.diskCancel()
|
2020-06-12 23:04:01 -04:00
|
|
|
return p.storage.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *xlStorageDiskIDCheck) GetDiskID() (string, error) {
|
|
|
|
return p.storage.GetDiskID()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *xlStorageDiskIDCheck) SetDiskID(id string) {
|
2024-04-15 04:25:46 -04:00
|
|
|
p.diskID.Store(&id)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-07-13 12:51:07 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) checkDiskStale() error {
|
2024-04-15 04:25:46 -04:00
|
|
|
if *p.diskID.Load() == emptyDiskID {
|
2020-07-13 12:51:07 -04:00
|
|
|
// For empty disk-id we allow the call as the server might be
|
|
|
|
// coming up and trying to read format.json or create format.json
|
|
|
|
return nil
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
storedDiskID, err := p.storage.GetDiskID()
|
2020-07-13 12:51:07 -04:00
|
|
|
if err != nil {
|
|
|
|
// return any error generated while reading `format.json`
|
|
|
|
return err
|
|
|
|
}
|
2024-04-15 04:25:46 -04:00
|
|
|
if err == nil && *p.diskID.Load() == storedDiskID {
|
2020-07-13 12:51:07 -04:00
|
|
|
return nil
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2020-07-13 12:51:07 -04:00
|
|
|
// not the same disk we remember, take it offline.
|
|
|
|
return errDiskNotFound
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-01-25 15:45:46 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context, opts DiskInfoOptions) (info DiskInfo, err error) {
|
2021-09-17 17:11:01 -04:00
|
|
|
if contextCanceled(ctx) {
|
2021-03-03 21:36:43 -05:00
|
|
|
return DiskInfo{}, ctx.Err()
|
|
|
|
}
|
|
|
|
|
2022-12-01 15:10:54 -05:00
|
|
|
si := p.updateStorageMetrics(storageMetricDiskInfo)
|
2024-06-01 01:17:37 -04:00
|
|
|
defer si(0, &err)
|
2022-12-01 15:10:54 -05:00
|
|
|
|
2024-01-25 15:45:46 -05:00
|
|
|
if opts.NoOp {
|
2024-02-08 22:28:46 -05:00
|
|
|
if opts.Metrics {
|
|
|
|
info.Metrics = p.getMetrics()
|
|
|
|
}
|
2024-01-30 02:03:58 -05:00
|
|
|
info.Metrics.TotalWrites = p.totalWrites.Load()
|
|
|
|
info.Metrics.TotalDeletes = p.totalDeletes.Load()
|
2024-01-25 15:45:46 -05:00
|
|
|
info.Metrics.TotalWaiting = uint32(p.health.waiting.Load())
|
|
|
|
info.Metrics.TotalErrorsTimeout = p.totalErrsTimeout.Load()
|
|
|
|
info.Metrics.TotalErrorsAvailability = p.totalErrsAvailability.Load()
|
2024-02-08 22:28:46 -05:00
|
|
|
if p.health.isFaulty() {
|
|
|
|
// if disk is already faulty return faulty for 'mc admin info' output and prometheus alerts.
|
|
|
|
return info, errFaultyDisk
|
|
|
|
}
|
|
|
|
return info, nil
|
2024-01-25 15:45:46 -05:00
|
|
|
}
|
|
|
|
|
2023-08-02 01:19:56 -04:00
|
|
|
defer func() {
|
2024-01-25 15:45:46 -05:00
|
|
|
if opts.Metrics {
|
2023-08-02 01:19:56 -04:00
|
|
|
info.Metrics = p.getMetrics()
|
|
|
|
}
|
2024-01-30 02:03:58 -05:00
|
|
|
info.Metrics.TotalWrites = p.totalWrites.Load()
|
|
|
|
info.Metrics.TotalDeletes = p.totalDeletes.Load()
|
2024-01-18 14:17:43 -05:00
|
|
|
info.Metrics.TotalWaiting = uint32(p.health.waiting.Load())
|
2023-12-29 18:52:41 -05:00
|
|
|
info.Metrics.TotalErrorsTimeout = p.totalErrsTimeout.Load()
|
|
|
|
info.Metrics.TotalErrorsAvailability = p.totalErrsAvailability.Load()
|
2023-08-02 01:19:56 -04:00
|
|
|
}()
|
2023-08-01 15:47:50 -04:00
|
|
|
|
2023-07-25 19:58:31 -04:00
|
|
|
if p.health.isFaulty() {
|
|
|
|
// if disk is already faulty return faulty for 'mc admin info' output and prometheus alerts.
|
|
|
|
return info, errFaultyDisk
|
|
|
|
}
|
|
|
|
|
2024-01-25 15:45:46 -05:00
|
|
|
info, err = p.storage.DiskInfo(ctx, opts)
|
2020-07-13 12:51:07 -04:00
|
|
|
if err != nil {
|
|
|
|
return info, err
|
|
|
|
}
|
2021-03-16 23:06:57 -04:00
|
|
|
|
2020-07-16 10:30:05 -04:00
|
|
|
// check cached diskID against backend
|
|
|
|
// only if its non-empty.
|
2024-04-15 04:25:46 -04:00
|
|
|
cachedID := *p.diskID.Load()
|
|
|
|
if cachedID != "" && cachedID != info.ID {
|
2023-07-25 19:58:31 -04:00
|
|
|
return info, errDiskNotFound
|
2022-03-09 14:38:54 -05:00
|
|
|
}
|
2020-07-13 12:51:07 -04:00
|
|
|
return info, nil
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) MakeVolBulk(ctx context.Context, volumes ...string) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricMakeVolBulk, volumes...)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2022-03-09 14:38:54 -05:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-25 19:58:31 -04:00
|
|
|
return w.Run(func() error { return p.storage.MakeVolBulk(ctx, volumes...) })
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) MakeVol(ctx context.Context, volume string) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricMakeVol, volume)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2021-03-03 21:36:43 -05:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-25 19:58:31 -04:00
|
|
|
return w.Run(func() error { return p.storage.MakeVol(ctx, volume) })
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) ListVols(ctx context.Context) (vi []VolInfo, err error) {
|
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricListVols, "/")
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return nil, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2022-03-09 14:38:54 -05:00
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
return p.storage.ListVols(ctx)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricStatVol, volume)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return vol, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2022-03-09 14:38:54 -05:00
|
|
|
|
2024-01-24 13:08:31 -05:00
|
|
|
return xioutil.WithDeadline[VolInfo](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result VolInfo, err error) {
|
|
|
|
return p.storage.StatVol(ctx, volume)
|
2023-07-25 19:58:31 -04:00
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteVol, volume)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2022-03-09 14:38:54 -05:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-25 19:58:31 -04:00
|
|
|
return w.Run(func() error { return p.storage.DeleteVol(ctx, volume, forceDelete) })
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:43:25 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (s []string, err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricListDir, volume, dirPath)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return nil, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2024-01-30 15:43:25 -05:00
|
|
|
return p.storage.ListDir(ctx, origvolume, volume, dirPath, count)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2023-07-28 18:37:53 -04:00
|
|
|
// Legacy API - does not have any deadlines
|
2020-09-04 12:45:06 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadFile, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return 0, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer func() {
|
|
|
|
done(n, &err)
|
|
|
|
}()
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2024-01-24 13:08:31 -05:00
|
|
|
return xioutil.WithDeadline[int64](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result int64, err error) {
|
|
|
|
return p.storage.ReadFile(ctx, volume, path, offset, buf, verifier)
|
2023-11-10 13:10:14 -05:00
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2023-07-28 18:37:53 -04:00
|
|
|
// Legacy API - does not have any deadlines
|
2020-09-04 12:45:06 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricAppendFile, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(int64(len(buf)), &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-11-10 13:10:14 -05:00
|
|
|
return w.Run(func() error {
|
|
|
|
return p.storage.AppendFile(ctx, volume, path, buf)
|
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:43:25 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, origvolume, volume, path string, size int64, reader io.Reader) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricCreateFile, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(size, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2024-01-30 15:43:25 -05:00
|
|
|
return p.storage.CreateFile(ctx, origvolume, volume, path, size, io.NopCloser(reader))
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadFileStream, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return nil, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(length, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2024-01-24 13:08:31 -05:00
|
|
|
return xioutil.WithDeadline[io.ReadCloser](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result io.ReadCloser, err error) {
|
|
|
|
return p.storage.ReadFileStream(ctx, volume, path, offset, length)
|
2023-11-10 13:10:14 -05:00
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) {
|
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricRenameFile, srcVolume, srcPath, dstVolume, dstPath)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-25 19:58:31 -04:00
|
|
|
return w.Run(func() error { return p.storage.RenameFile(ctx, srcVolume, srcPath, dstVolume, dstPath) })
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-04-23 13:15:52 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (res RenameDataResp, err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricRenameData, srcPath, fi.DataDir, dstVolume, dstPath)
|
|
|
|
if err != nil {
|
2024-04-23 13:15:52 -04:00
|
|
|
return res, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2023-12-29 18:52:41 -05:00
|
|
|
defer func() {
|
2024-01-30 02:03:58 -05:00
|
|
|
if err == nil && !skipAccessChecks(dstVolume) {
|
2023-12-29 18:52:41 -05:00
|
|
|
p.storage.setWriteAttribute(p.totalWrites.Add(1))
|
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
done(0, &err)
|
2023-12-29 18:52:41 -05:00
|
|
|
}()
|
2024-04-23 13:15:52 -04:00
|
|
|
|
2024-04-23 01:07:19 -04:00
|
|
|
// Copy inline data to a new buffer to function with deadlines.
|
|
|
|
if len(fi.Data) > 0 {
|
|
|
|
fi.Data = append(grid.GetByteBufferCap(len(fi.Data))[:0], fi.Data...)
|
|
|
|
}
|
2024-04-23 13:15:52 -04:00
|
|
|
return xioutil.WithDeadline[RenameDataResp](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (res RenameDataResp, err error) {
|
2024-04-23 01:07:19 -04:00
|
|
|
if len(fi.Data) > 0 {
|
|
|
|
defer grid.PutByteBuffer(fi.Data)
|
|
|
|
}
|
2024-01-24 13:08:31 -05:00
|
|
|
return p.storage.RenameData(ctx, srcVolume, srcPath, fi, dstVolume, dstPath, opts)
|
2023-07-25 19:58:31 -04:00
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-06-10 11:51:27 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (*CheckPartsResp, error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricCheckParts, volume, path)
|
|
|
|
if err != nil {
|
2024-06-10 11:51:27 -04:00
|
|
|
return nil, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2024-06-10 11:51:27 -04:00
|
|
|
return xioutil.WithDeadline[*CheckPartsResp](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (res *CheckPartsResp, err error) {
|
|
|
|
return p.storage.CheckParts(ctx, volume, path, fi)
|
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-07-29 21:56:40 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) DeleteBulk(ctx context.Context, volume string, paths ...string) (err error) {
|
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteBulk, append([]string{volume}, paths...)...)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer done(0, &err)
|
|
|
|
|
|
|
|
return p.storage.DeleteBulk(ctx, volume, paths...)
|
|
|
|
}
|
|
|
|
|
2022-07-11 12:15:54 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDelete, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-25 19:58:31 -04:00
|
|
|
return w.Run(func() error { return p.storage.Delete(ctx, volume, path, deleteOpts) })
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2021-03-27 02:24:07 -04:00
|
|
|
// DeleteVersions deletes slice of versions, it can be same object
|
|
|
|
// or multiple objects.
|
2023-12-29 18:52:41 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions, opts DeleteOptions) (errs []error) {
|
2021-11-01 13:50:07 -04:00
|
|
|
// Merely for tracing storage
|
2021-03-27 02:24:07 -04:00
|
|
|
path := ""
|
|
|
|
if len(versions) > 0 {
|
|
|
|
path = versions[0].Name
|
|
|
|
}
|
|
|
|
errs = make([]error, len(versions))
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteVersions, volume, path)
|
|
|
|
if err != nil {
|
2021-03-16 23:06:57 -04:00
|
|
|
for i := range errs {
|
|
|
|
errs[i] = ctx.Err()
|
|
|
|
}
|
|
|
|
return errs
|
|
|
|
}
|
2023-12-29 18:52:41 -05:00
|
|
|
defer func() {
|
2024-01-30 02:03:58 -05:00
|
|
|
if !skipAccessChecks(volume) {
|
2023-12-29 18:52:41 -05:00
|
|
|
var permanentDeletes uint64
|
|
|
|
var deleteMarkers uint64
|
|
|
|
|
|
|
|
for i, nerr := range errs {
|
|
|
|
if nerr != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, fi := range versions[i].Versions {
|
|
|
|
if fi.Deleted {
|
|
|
|
// Delete markers are a write operation not a permanent delete.
|
|
|
|
deleteMarkers++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
permanentDeletes++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if deleteMarkers > 0 {
|
|
|
|
p.storage.setWriteAttribute(p.totalWrites.Add(deleteMarkers))
|
|
|
|
}
|
|
|
|
if permanentDeletes > 0 {
|
|
|
|
p.storage.setDeleteAttribute(p.totalDeletes.Add(permanentDeletes))
|
|
|
|
}
|
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
done(0, &err)
|
2023-12-29 18:52:41 -05:00
|
|
|
}()
|
2023-07-25 19:58:31 -04:00
|
|
|
|
2023-12-29 18:52:41 -05:00
|
|
|
errs = p.storage.DeleteVersions(ctx, volume, versions, opts)
|
2022-03-09 14:38:54 -05:00
|
|
|
for i := range errs {
|
|
|
|
if errs[i] != nil {
|
|
|
|
err = errs[i]
|
|
|
|
break
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
}
|
2021-03-27 02:24:07 -04:00
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
return errs
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-06-10 11:51:27 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (*CheckPartsResp, error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricVerifyFile, volume, path)
|
|
|
|
if err != nil {
|
2024-06-10 11:51:27 -04:00
|
|
|
return nil, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
return p.storage.VerifyFile(ctx, volume, path, fi)
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-11-02 19:14:31 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricWriteAll, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(int64(len(b)), &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-25 19:58:31 -04:00
|
|
|
return w.Run(func() error { return p.storage.WriteAll(ctx, volume, path, b) })
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2023-12-29 18:52:41 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteVersion, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2023-12-29 18:52:41 -05:00
|
|
|
defer func() {
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2023-12-29 18:52:41 -05:00
|
|
|
|
2024-01-30 02:03:58 -05:00
|
|
|
if err == nil && !skipAccessChecks(volume) {
|
2023-12-29 18:52:41 -05:00
|
|
|
if opts.UndoWrite {
|
|
|
|
p.storage.setWriteAttribute(p.totalWrites.Add(^uint64(0)))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if fi.Deleted {
|
|
|
|
// Delete markers are a write operation not a permanent delete.
|
|
|
|
p.storage.setWriteAttribute(p.totalWrites.Add(1))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
p.storage.setDeleteAttribute(p.totalDeletes.Add(1))
|
|
|
|
}
|
|
|
|
}()
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-12-29 18:52:41 -05:00
|
|
|
return w.Run(func() error { return p.storage.DeleteVersion(ctx, volume, path, fi, forceDelMarker, opts) })
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2023-08-25 10:58:11 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricUpdateMetadata, volume, path)
|
|
|
|
if err != nil {
|
2021-04-04 16:32:31 -04:00
|
|
|
return err
|
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2021-04-04 16:32:31 -04:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-08-25 10:58:11 -04:00
|
|
|
return w.Run(func() error { return p.storage.UpdateMetadata(ctx, volume, path, fi, opts) })
|
2021-04-04 16:32:31 -04:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:43:25 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) (err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricWriteMetadata, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2024-01-30 15:43:25 -05:00
|
|
|
return w.Run(func() error { return p.storage.WriteMetadata(ctx, origvolume, volume, path, fi) })
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2024-01-30 15:43:25 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadVersion, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return fi, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2024-01-24 13:08:31 -05:00
|
|
|
return xioutil.WithDeadline[FileInfo](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result FileInfo, err error) {
|
2024-01-30 15:43:25 -05:00
|
|
|
return p.storage.ReadVersion(ctx, origvolume, volume, path, versionID, opts)
|
2023-07-27 10:33:05 -04:00
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2020-09-04 12:45:06 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadAll, volume, path)
|
|
|
|
if err != nil {
|
2020-07-13 12:51:07 -04:00
|
|
|
return nil, err
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
var sz int
|
|
|
|
defer func() {
|
|
|
|
sz = len(buf)
|
|
|
|
done(int64(sz), &err)
|
|
|
|
}()
|
2020-07-13 12:51:07 -04:00
|
|
|
|
2024-01-24 13:08:31 -05:00
|
|
|
return xioutil.WithDeadline[[]byte](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result []byte, err error) {
|
|
|
|
return p.storage.ReadAll(ctx, volume, path)
|
2023-07-27 10:33:05 -04:00
|
|
|
})
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
2021-03-16 23:06:57 -04:00
|
|
|
|
2022-04-20 15:49:05 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) ReadXL(ctx context.Context, volume string, path string, readData bool) (rf RawFileInfo, err error) {
|
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadXL, volume, path)
|
|
|
|
if err != nil {
|
|
|
|
return RawFileInfo{}, err
|
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer func() {
|
|
|
|
done(int64(len(rf.Buf)), &err)
|
|
|
|
}()
|
2022-04-20 15:49:05 -04:00
|
|
|
|
2024-01-24 13:08:31 -05:00
|
|
|
return xioutil.WithDeadline[RawFileInfo](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result RawFileInfo, err error) {
|
|
|
|
return p.storage.ReadXL(ctx, volume, path, readData)
|
2023-07-27 10:33:05 -04:00
|
|
|
})
|
2022-04-20 15:49:05 -04:00
|
|
|
}
|
|
|
|
|
2021-10-01 14:50:00 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricStatInfoFile, volume, path)
|
|
|
|
if err != nil {
|
2021-10-01 14:50:00 -04:00
|
|
|
return nil, err
|
2021-07-09 14:29:16 -04:00
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2021-07-09 14:29:16 -04:00
|
|
|
|
2021-10-01 14:50:00 -04:00
|
|
|
return p.storage.StatInfoFile(ctx, volume, path, glob)
|
2021-07-09 14:29:16 -04:00
|
|
|
}
|
|
|
|
|
2023-07-27 10:33:05 -04:00
|
|
|
// ReadMultiple will read multiple files and send each files as response.
|
2022-07-19 11:35:29 -04:00
|
|
|
// Files are read and returned in the given order.
|
|
|
|
// The resp channel is closed before the call returns.
|
|
|
|
// Only a canceled context will return an error.
|
2023-07-27 10:33:05 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) ReadMultiple(ctx context.Context, req ReadMultipleReq, resp chan<- ReadMultipleResp) (err error) {
|
2022-07-19 11:35:29 -04:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadMultiple, req.Bucket, req.Prefix)
|
|
|
|
if err != nil {
|
2024-01-28 13:04:17 -05:00
|
|
|
xioutil.SafeClose(resp)
|
2022-07-19 11:35:29 -04:00
|
|
|
return err
|
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2022-07-19 11:35:29 -04:00
|
|
|
|
|
|
|
return p.storage.ReadMultiple(ctx, req, resp)
|
|
|
|
}
|
|
|
|
|
2022-11-28 13:20:55 -05:00
|
|
|
// CleanAbandonedData will read metadata of the object on disk
|
|
|
|
// and delete any data directories and inline data that isn't referenced in metadata.
|
2023-07-27 10:33:05 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) CleanAbandonedData(ctx context.Context, volume string, path string) (err error) {
|
2022-11-28 13:20:55 -05:00
|
|
|
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteAbandonedParts, volume, path)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
defer done(0, &err)
|
2022-11-28 13:20:55 -05:00
|
|
|
|
2023-11-27 12:15:06 -05:00
|
|
|
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
|
2023-07-27 10:33:05 -04:00
|
|
|
return w.Run(func() error { return p.storage.CleanAbandonedData(ctx, volume, path) })
|
2022-11-28 13:20:55 -05:00
|
|
|
}
|
|
|
|
|
2024-06-01 01:17:37 -04:00
|
|
|
func storageTrace(s storageMetric, startTime time.Time, duration time.Duration, path string, size int64, err string, custom map[string]string) madmin.TraceInfo {
|
2021-05-06 11:52:02 -04:00
|
|
|
return madmin.TraceInfo{
|
|
|
|
TraceType: madmin.TraceStorage,
|
2021-03-27 02:24:07 -04:00
|
|
|
Time: startTime,
|
|
|
|
NodeName: globalLocalNodeName,
|
2021-03-27 13:07:07 -04:00
|
|
|
FuncName: "storage." + s.String(),
|
2022-07-05 17:45:49 -04:00
|
|
|
Duration: duration,
|
2024-06-01 01:17:37 -04:00
|
|
|
Bytes: size,
|
2022-07-05 17:45:49 -04:00
|
|
|
Path: path,
|
2022-12-01 15:10:54 -05:00
|
|
|
Error: err,
|
2024-01-24 13:08:31 -05:00
|
|
|
Custom: custom,
|
2022-07-05 17:45:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-21 12:33:33 -05:00
|
|
|
func scannerTrace(s scannerMetric, startTime time.Time, duration time.Duration, path string, custom map[string]string) madmin.TraceInfo {
|
2022-07-05 17:45:49 -04:00
|
|
|
return madmin.TraceInfo{
|
|
|
|
TraceType: madmin.TraceScanner,
|
|
|
|
Time: startTime,
|
|
|
|
NodeName: globalLocalNodeName,
|
|
|
|
FuncName: "scanner." + s.String(),
|
|
|
|
Duration: duration,
|
|
|
|
Path: path,
|
2023-02-21 12:33:33 -05:00
|
|
|
Custom: custom,
|
2021-03-27 02:24:07 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-16 23:06:57 -04:00
|
|
|
// Update storage metrics
|
2024-06-01 01:17:37 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) updateStorageMetrics(s storageMetric, paths ...string) func(sz int64, err *error) {
|
2021-03-16 23:06:57 -04:00
|
|
|
startTime := time.Now()
|
2022-07-05 17:45:49 -04:00
|
|
|
trace := globalTrace.NumSubscribers(madmin.TraceStorage) > 0
|
2024-06-01 01:17:37 -04:00
|
|
|
return func(sz int64, errp *error) {
|
2021-03-27 02:24:07 -04:00
|
|
|
duration := time.Since(startTime)
|
|
|
|
|
2023-07-30 02:26:26 -04:00
|
|
|
var err error
|
|
|
|
if errp != nil && *errp != nil {
|
|
|
|
err = *errp
|
|
|
|
}
|
|
|
|
|
2021-03-16 23:06:57 -04:00
|
|
|
atomic.AddUint64(&p.apiCalls[s], 1)
|
2023-07-30 02:26:26 -04:00
|
|
|
if IsErr(err, []error{
|
|
|
|
errFaultyDisk,
|
|
|
|
errFaultyRemoteDisk,
|
|
|
|
context.DeadlineExceeded,
|
|
|
|
}...) {
|
2023-12-04 14:54:13 -05:00
|
|
|
p.totalErrsAvailability.Add(1)
|
2024-02-05 21:16:13 -05:00
|
|
|
if errors.Is(err, context.DeadlineExceeded) {
|
2023-12-04 14:54:13 -05:00
|
|
|
p.totalErrsTimeout.Add(1)
|
2023-07-30 02:26:26 -04:00
|
|
|
}
|
|
|
|
}
|
2023-12-29 18:52:41 -05:00
|
|
|
|
2022-01-25 19:31:44 -05:00
|
|
|
p.apiLatencies[s].add(duration)
|
2021-03-27 02:24:07 -04:00
|
|
|
|
|
|
|
if trace {
|
2024-01-24 13:08:31 -05:00
|
|
|
custom := make(map[string]string, 2)
|
2023-07-30 02:26:26 -04:00
|
|
|
paths = append([]string{p.String()}, paths...)
|
2022-12-01 15:10:54 -05:00
|
|
|
var errStr string
|
2023-07-30 02:26:26 -04:00
|
|
|
if err != nil {
|
|
|
|
errStr = err.Error()
|
2022-12-01 15:10:54 -05:00
|
|
|
}
|
2023-12-29 18:52:41 -05:00
|
|
|
custom["total-errs-timeout"] = strconv.FormatUint(p.totalErrsTimeout.Load(), 10)
|
|
|
|
custom["total-errs-availability"] = strconv.FormatUint(p.totalErrsAvailability.Load(), 10)
|
2024-06-01 01:17:37 -04:00
|
|
|
globalTrace.Publish(storageTrace(s, startTime, duration, strings.Join(paths, " "), sz, errStr, custom))
|
2021-03-27 02:24:07 -04:00
|
|
|
}
|
2021-03-16 23:06:57 -04:00
|
|
|
}
|
|
|
|
}
|
2022-03-09 14:38:54 -05:00
|
|
|
|
|
|
|
const (
|
2024-01-18 14:17:43 -05:00
|
|
|
diskHealthOK int32 = iota
|
2022-03-09 14:38:54 -05:00
|
|
|
diskHealthFaulty
|
|
|
|
)
|
|
|
|
|
|
|
|
type diskHealthTracker struct {
|
|
|
|
// atomic time of last success
|
|
|
|
lastSuccess int64
|
|
|
|
|
|
|
|
// atomic time of last time a token was grabbed.
|
|
|
|
lastStarted int64
|
|
|
|
|
|
|
|
// Atomic status of disk.
|
2024-01-18 14:17:43 -05:00
|
|
|
status atomic.Int32
|
2022-03-09 14:38:54 -05:00
|
|
|
|
2024-02-02 13:10:54 -05:00
|
|
|
// Atomic number indicates if a disk is hung
|
2024-01-18 14:17:43 -05:00
|
|
|
waiting atomic.Int32
|
2022-03-09 14:38:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// newDiskHealthTracker creates a new disk health tracker.
|
2024-02-02 13:10:54 -05:00
|
|
|
func newDiskHealthTracker() *diskHealthTracker {
|
2022-03-09 14:38:54 -05:00
|
|
|
d := diskHealthTracker{
|
|
|
|
lastSuccess: time.Now().UnixNano(),
|
|
|
|
lastStarted: time.Now().UnixNano(),
|
|
|
|
}
|
2024-01-18 14:17:43 -05:00
|
|
|
d.status.Store(diskHealthOK)
|
2022-03-09 14:38:54 -05:00
|
|
|
return &d
|
|
|
|
}
|
|
|
|
|
|
|
|
// logSuccess will update the last successful operation time.
|
|
|
|
func (d *diskHealthTracker) logSuccess() {
|
|
|
|
atomic.StoreInt64(&d.lastSuccess, time.Now().UnixNano())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *diskHealthTracker) isFaulty() bool {
|
2024-01-18 14:17:43 -05:00
|
|
|
return d.status.Load() == diskHealthFaulty
|
2022-03-09 14:38:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
type (
|
|
|
|
healthDiskCtxKey struct{}
|
|
|
|
healthDiskCtxValue struct {
|
|
|
|
lastSuccess *int64
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
// logSuccess will update the last successful operation time.
|
|
|
|
func (h *healthDiskCtxValue) logSuccess() {
|
|
|
|
atomic.StoreInt64(h.lastSuccess, time.Now().UnixNano())
|
|
|
|
}
|
|
|
|
|
|
|
|
// noopDoneFunc is a no-op done func.
|
|
|
|
// Can be reused.
|
2024-06-01 01:17:37 -04:00
|
|
|
var noopDoneFunc = func(_ int64, _ *error) {}
|
2022-03-09 14:38:54 -05:00
|
|
|
|
|
|
|
// TrackDiskHealth for this request.
|
|
|
|
// When a non-nil error is returned 'done' MUST be called
|
|
|
|
// with the status of the response, if it corresponds to disk health.
|
|
|
|
// If the pointer sent to done is non-nil AND the error
|
|
|
|
// is either nil or io.EOF the disk is considered good.
|
|
|
|
// So if unsure if the disk status is ok, return nil as a parameter to done.
|
|
|
|
// Shadowing will work as long as return error is named: https://go.dev/play/p/sauq86SsTN2
|
2024-06-01 01:17:37 -04:00
|
|
|
func (p *xlStorageDiskIDCheck) TrackDiskHealth(ctx context.Context, s storageMetric, paths ...string) (c context.Context, done func(int64, *error), err error) {
|
2022-03-09 14:38:54 -05:00
|
|
|
done = noopDoneFunc
|
|
|
|
if contextCanceled(ctx) {
|
|
|
|
return ctx, done, ctx.Err()
|
|
|
|
}
|
|
|
|
|
2024-02-02 13:10:54 -05:00
|
|
|
if p.health.status.Load() != diskHealthOK {
|
|
|
|
return ctx, done, errFaultyDisk
|
2022-03-09 14:38:54 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify if the disk is not stale
|
|
|
|
// - missing format.json (unformatted drive)
|
|
|
|
// - format.json is valid but invalid 'uuid'
|
|
|
|
if err = p.checkDiskStale(); err != nil {
|
|
|
|
return ctx, done, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disallow recursive tracking to avoid deadlocks.
|
|
|
|
if ctx.Value(healthDiskCtxKey{}) != nil {
|
|
|
|
done = p.updateStorageMetrics(s, paths...)
|
|
|
|
return ctx, done, nil
|
|
|
|
}
|
|
|
|
|
2024-02-02 13:10:54 -05:00
|
|
|
if contextCanceled(ctx) {
|
2022-03-09 14:38:54 -05:00
|
|
|
return ctx, done, ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic.StoreInt64(&p.health.lastStarted, time.Now().UnixNano())
|
2024-02-02 13:10:54 -05:00
|
|
|
p.health.waiting.Add(1)
|
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
ctx = context.WithValue(ctx, healthDiskCtxKey{}, &healthDiskCtxValue{lastSuccess: &p.health.lastSuccess})
|
|
|
|
si := p.updateStorageMetrics(s, paths...)
|
|
|
|
var once sync.Once
|
2024-06-01 01:17:37 -04:00
|
|
|
return ctx, func(sz int64, errp *error) {
|
2024-02-02 13:10:54 -05:00
|
|
|
p.health.waiting.Add(-1)
|
2022-03-09 14:38:54 -05:00
|
|
|
once.Do(func() {
|
|
|
|
if errp != nil {
|
|
|
|
err := *errp
|
2023-08-01 15:47:50 -04:00
|
|
|
if err == nil || errors.Is(err, io.EOF) {
|
|
|
|
p.health.logSuccess()
|
2022-03-09 14:38:54 -05:00
|
|
|
}
|
|
|
|
}
|
2024-06-01 01:17:37 -04:00
|
|
|
si(sz, errp)
|
2022-03-09 14:38:54 -05:00
|
|
|
})
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2024-01-12 04:48:36 -05:00
|
|
|
var toWrite = []byte{2048: 42}
|
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
// monitorDiskStatus should be called once when a drive has been marked offline.
|
|
|
|
// Once the disk has been deemed ok, it will return to online status.
|
2024-01-12 04:48:36 -05:00
|
|
|
func (p *xlStorageDiskIDCheck) monitorDiskStatus(spent time.Duration, fn string) {
|
2022-03-09 14:38:54 -05:00
|
|
|
t := time.NewTicker(5 * time.Second)
|
|
|
|
defer t.Stop()
|
2023-07-25 19:58:31 -04:00
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
for range t.C {
|
2024-01-12 04:48:36 -05:00
|
|
|
if contextCanceled(p.diskCtx) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err := p.storage.WriteAll(context.Background(), minioMetaTmpBucket, fn, toWrite)
|
2022-03-09 14:38:54 -05:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2024-01-12 04:48:36 -05:00
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
b, err := p.storage.ReadAll(context.Background(), minioMetaTmpBucket, fn)
|
2024-01-12 04:48:36 -05:00
|
|
|
if err != nil || len(b) != len(toWrite) {
|
2022-03-09 14:38:54 -05:00
|
|
|
continue
|
|
|
|
}
|
2024-01-12 04:48:36 -05:00
|
|
|
|
2022-07-11 12:15:54 -04:00
|
|
|
err = p.storage.Delete(context.Background(), minioMetaTmpBucket, fn, DeleteOptions{
|
|
|
|
Recursive: false,
|
2023-11-29 01:35:16 -05:00
|
|
|
Immediate: false,
|
2022-07-11 12:15:54 -04:00
|
|
|
})
|
2024-01-12 04:48:36 -05:00
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
if err == nil {
|
2024-04-28 13:53:50 -04:00
|
|
|
logger.Event(context.Background(), "healthcheck",
|
|
|
|
"node(%s): Read/Write/Delete successful, bringing drive %s online", globalLocalNodeName, p.storage.String())
|
2024-01-18 14:17:43 -05:00
|
|
|
p.health.status.Store(diskHealthOK)
|
|
|
|
p.health.waiting.Add(-1)
|
2022-03-09 14:38:54 -05:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-13 14:41:55 -04:00
|
|
|
// monitorDiskStatus should be called once when a drive has been marked offline.
|
|
|
|
// Once the disk has been deemed ok, it will return to online status.
|
|
|
|
func (p *xlStorageDiskIDCheck) monitorDiskWritable(ctx context.Context) {
|
2023-07-25 19:58:31 -04:00
|
|
|
var (
|
2023-07-13 14:41:55 -04:00
|
|
|
// We check every 15 seconds if the disk is writable and we can read back.
|
|
|
|
checkEvery = 15 * time.Second
|
|
|
|
|
|
|
|
// If the disk has completed an operation successfully within last 5 seconds, don't check it.
|
|
|
|
skipIfSuccessBefore = 5 * time.Second
|
|
|
|
)
|
|
|
|
|
2023-07-25 19:58:31 -04:00
|
|
|
// if disk max timeout is smaller than checkEvery window
|
|
|
|
// reduce checks by a second.
|
2023-11-27 12:15:06 -05:00
|
|
|
if globalDriveConfig.GetMaxTimeout() <= checkEvery {
|
|
|
|
checkEvery = globalDriveConfig.GetMaxTimeout() - time.Second
|
2023-07-25 23:02:22 -04:00
|
|
|
if checkEvery <= 0 {
|
2023-11-27 12:15:06 -05:00
|
|
|
checkEvery = globalDriveConfig.GetMaxTimeout()
|
2023-07-25 23:02:22 -04:00
|
|
|
}
|
2023-07-25 19:58:31 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// if disk max timeout is smaller than skipIfSuccessBefore window
|
|
|
|
// reduce the skipIfSuccessBefore by a second.
|
2023-11-27 12:15:06 -05:00
|
|
|
if globalDriveConfig.GetMaxTimeout() <= skipIfSuccessBefore {
|
|
|
|
skipIfSuccessBefore = globalDriveConfig.GetMaxTimeout() - time.Second
|
2023-07-25 23:02:22 -04:00
|
|
|
if skipIfSuccessBefore <= 0 {
|
2023-11-27 12:15:06 -05:00
|
|
|
skipIfSuccessBefore = globalDriveConfig.GetMaxTimeout()
|
2023-07-25 23:02:22 -04:00
|
|
|
}
|
2023-07-25 19:58:31 -04:00
|
|
|
}
|
|
|
|
|
2023-07-13 14:41:55 -04:00
|
|
|
t := time.NewTicker(checkEvery)
|
|
|
|
defer t.Stop()
|
|
|
|
fn := mustGetUUID()
|
|
|
|
|
|
|
|
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
|
2023-08-01 13:54:26 -04:00
|
|
|
monitor := func() bool {
|
2023-07-13 14:41:55 -04:00
|
|
|
if contextCanceled(ctx) {
|
2023-08-01 13:54:26 -04:00
|
|
|
return false
|
2023-07-13 14:41:55 -04:00
|
|
|
}
|
2023-08-01 13:54:26 -04:00
|
|
|
|
2024-01-18 14:17:43 -05:00
|
|
|
if p.health.status.Load() != diskHealthOK {
|
2023-08-01 13:54:26 -04:00
|
|
|
return true
|
2023-07-13 14:41:55 -04:00
|
|
|
}
|
2023-08-01 13:54:26 -04:00
|
|
|
|
2023-07-13 14:41:55 -04:00
|
|
|
if time.Since(time.Unix(0, atomic.LoadInt64(&p.health.lastSuccess))) < skipIfSuccessBefore {
|
|
|
|
// We recently saw a success - no need to check.
|
2023-08-01 13:54:26 -04:00
|
|
|
return true
|
2023-07-13 14:41:55 -04:00
|
|
|
}
|
2023-08-01 13:54:26 -04:00
|
|
|
|
2023-07-25 19:58:31 -04:00
|
|
|
goOffline := func(err error, spent time.Duration) {
|
2024-01-18 14:17:43 -05:00
|
|
|
if p.health.status.CompareAndSwap(diskHealthOK, diskHealthFaulty) {
|
2024-04-04 08:04:40 -04:00
|
|
|
storageLogAlwaysIf(ctx, fmt.Errorf("node(%s): taking drive %s offline: %v", globalLocalNodeName, p.storage.String(), err))
|
2024-01-18 14:17:43 -05:00
|
|
|
p.health.waiting.Add(1)
|
2024-01-12 04:48:36 -05:00
|
|
|
go p.monitorDiskStatus(spent, fn)
|
2023-07-13 14:41:55 -04:00
|
|
|
}
|
|
|
|
}
|
2023-08-01 13:54:26 -04:00
|
|
|
|
2023-07-13 14:41:55 -04:00
|
|
|
// Offset checks a bit.
|
|
|
|
time.Sleep(time.Duration(rng.Int63n(int64(1 * time.Second))))
|
2023-08-01 13:54:26 -04:00
|
|
|
|
|
|
|
dctx, dcancel := context.WithCancel(ctx)
|
2023-07-13 14:41:55 -04:00
|
|
|
started := time.Now()
|
|
|
|
go func() {
|
2023-11-27 12:15:06 -05:00
|
|
|
timeout := time.NewTimer(globalDriveConfig.GetMaxTimeout())
|
2023-07-13 14:41:55 -04:00
|
|
|
select {
|
2023-08-01 13:54:26 -04:00
|
|
|
case <-dctx.Done():
|
2023-07-13 14:41:55 -04:00
|
|
|
if !timeout.Stop() {
|
|
|
|
<-timeout.C
|
|
|
|
}
|
2023-08-01 13:54:26 -04:00
|
|
|
case <-timeout.C:
|
|
|
|
spent := time.Since(started)
|
|
|
|
goOffline(fmt.Errorf("unable to write+read for %v", spent.Round(time.Millisecond)), spent)
|
2023-07-13 14:41:55 -04:00
|
|
|
}
|
|
|
|
}()
|
2023-08-01 13:54:26 -04:00
|
|
|
|
2023-07-13 14:41:55 -04:00
|
|
|
func() {
|
2023-08-01 13:54:26 -04:00
|
|
|
defer dcancel()
|
|
|
|
|
2023-07-13 14:41:55 -04:00
|
|
|
err := p.storage.WriteAll(ctx, minioMetaTmpBucket, fn, toWrite)
|
|
|
|
if err != nil {
|
|
|
|
if osErrToFileErr(err) == errFaultyDisk {
|
2023-07-25 19:58:31 -04:00
|
|
|
goOffline(fmt.Errorf("unable to write: %w", err), 0)
|
2023-07-13 14:41:55 -04:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
b, err := p.storage.ReadAll(context.Background(), minioMetaTmpBucket, fn)
|
|
|
|
if err != nil || len(b) != len(toWrite) {
|
|
|
|
if osErrToFileErr(err) == errFaultyDisk {
|
2023-07-25 19:58:31 -04:00
|
|
|
goOffline(fmt.Errorf("unable to read: %w", err), 0)
|
2023-07-13 14:41:55 -04:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
2023-08-01 13:54:26 -04:00
|
|
|
|
|
|
|
// Continue to monitor
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-t.C:
|
|
|
|
if !monitor() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2023-07-13 14:41:55 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
perf: websocket grid connectivity for all internode communication (#18461)
This PR adds a WebSocket grid feature that allows servers to communicate via
a single two-way connection.
There are two request types:
* Single requests, which are `[]byte => ([]byte, error)`. This is for efficient small
roundtrips with small payloads.
* Streaming requests which are `[]byte, chan []byte => chan []byte (and error)`,
which allows for different combinations of full two-way streams with an initial payload.
Only a single stream is created between two machines - and there is, as such, no
server/client relation since both sides can initiate and handle requests. Which server
initiates the request is decided deterministically on the server names.
Requests are made through a mux client and server, which handles message
passing, congestion, cancelation, timeouts, etc.
If a connection is lost, all requests are canceled, and the calling server will try
to reconnect. Registered handlers can operate directly on byte
slices or use a higher-level generics abstraction.
There is no versioning of handlers/clients, and incompatible changes should
be handled by adding new handlers.
The request path can be changed to a new one for any protocol changes.
First, all servers create a "Manager." The manager must know its address
as well as all remote addresses. This will manage all connections.
To get a connection to any remote, ask the manager to provide it given
the remote address using.
```
func (m *Manager) Connection(host string) *Connection
```
All serverside handlers must also be registered on the manager. This will
make sure that all incoming requests are served. The number of in-flight
requests and responses must also be given for streaming requests.
The "Connection" returned manages the mux-clients. Requests issued
to the connection will be sent to the remote.
* `func (c *Connection) Request(ctx context.Context, h HandlerID, req []byte) ([]byte, error)`
performs a single request and returns the result. Any deadline provided on the request is
forwarded to the server, and canceling the context will make the function return at once.
* `func (c *Connection) NewStream(ctx context.Context, h HandlerID, payload []byte) (st *Stream, err error)`
will initiate a remote call and send the initial payload.
```Go
// A Stream is a two-way stream.
// All responses *must* be read by the caller.
// If the call is canceled through the context,
//The appropriate error will be returned.
type Stream struct {
// Responses from the remote server.
// Channel will be closed after an error or when the remote closes.
// All responses *must* be read by the caller until either an error is returned or the channel is closed.
// Canceling the context will cause the context cancellation error to be returned.
Responses <-chan Response
// Requests sent to the server.
// If the handler is defined with 0 incoming capacity this will be nil.
// Channel *must* be closed to signal the end of the stream.
// If the request context is canceled, the stream will no longer process requests.
Requests chan<- []byte
}
type Response struct {
Msg []byte
Err error
}
```
There are generic versions of the server/client handlers that allow the use of type
safe implementations for data types that support msgpack marshal/unmarshal.
2023-11-20 20:09:35 -05:00
|
|
|
// checkID will check if the disk ID matches the provided ID.
|
|
|
|
func (p *xlStorageDiskIDCheck) checkID(wantID string) (err error) {
|
|
|
|
if wantID == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
id, err := p.storage.GetDiskID()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if id != wantID {
|
|
|
|
return fmt.Errorf("disk ID %s does not match. disk reports %s", wantID, id)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-03-09 14:38:54 -05:00
|
|
|
// diskHealthCheckOK will check if the provided error is nil
|
|
|
|
// and update disk status if good.
|
|
|
|
// For convenience a bool is returned to indicate any error state
|
|
|
|
// that is not io.EOF.
|
|
|
|
func diskHealthCheckOK(ctx context.Context, err error) bool {
|
|
|
|
// Check if context has a disk health check.
|
|
|
|
tracker, ok := ctx.Value(healthDiskCtxKey{}).(*healthDiskCtxValue)
|
|
|
|
if !ok {
|
|
|
|
// No tracker, return
|
|
|
|
return err == nil || errors.Is(err, io.EOF)
|
|
|
|
}
|
|
|
|
if err == nil || errors.Is(err, io.EOF) {
|
|
|
|
tracker.logSuccess()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// diskHealthWrapper provides either a io.Reader or io.Writer
|
|
|
|
// that updates status of the provided tracker.
|
|
|
|
// Use through diskHealthReader or diskHealthWriter.
|
|
|
|
type diskHealthWrapper struct {
|
|
|
|
tracker *healthDiskCtxValue
|
|
|
|
r io.Reader
|
|
|
|
w io.Writer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *diskHealthWrapper) Read(p []byte) (int, error) {
|
|
|
|
if d.r == nil {
|
|
|
|
return 0, fmt.Errorf("diskHealthWrapper: Read with no reader")
|
|
|
|
}
|
|
|
|
n, err := d.r.Read(p)
|
|
|
|
if err == nil || err == io.EOF && n > 0 {
|
|
|
|
d.tracker.logSuccess()
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *diskHealthWrapper) Write(p []byte) (int, error) {
|
|
|
|
if d.w == nil {
|
|
|
|
return 0, fmt.Errorf("diskHealthWrapper: Write with no writer")
|
|
|
|
}
|
|
|
|
n, err := d.w.Write(p)
|
|
|
|
if err == nil && n == len(p) {
|
|
|
|
d.tracker.logSuccess()
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// diskHealthReader provides a wrapper that will update disk health on
|
|
|
|
// ctx, on every successful read.
|
|
|
|
// This should only be used directly at the os/syscall level,
|
|
|
|
// otherwise buffered operations may return false health checks.
|
|
|
|
func diskHealthReader(ctx context.Context, r io.Reader) io.Reader {
|
|
|
|
// Check if context has a disk health check.
|
|
|
|
tracker, ok := ctx.Value(healthDiskCtxKey{}).(*healthDiskCtxValue)
|
|
|
|
if !ok {
|
|
|
|
// No need to wrap
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
return &diskHealthWrapper{r: r, tracker: tracker}
|
|
|
|
}
|
|
|
|
|
|
|
|
// diskHealthWriter provides a wrapper that will update disk health on
|
|
|
|
// ctx, on every successful write.
|
|
|
|
// This should only be used directly at the os/syscall level,
|
|
|
|
// otherwise buffered operations may return false health checks.
|
|
|
|
func diskHealthWriter(ctx context.Context, w io.Writer) io.Writer {
|
|
|
|
// Check if context has a disk health check.
|
|
|
|
tracker, ok := ctx.Value(healthDiskCtxKey{}).(*healthDiskCtxValue)
|
|
|
|
if !ok {
|
|
|
|
// No need to wrap
|
|
|
|
return w
|
|
|
|
}
|
|
|
|
return &diskHealthWrapper{w: w, tracker: tracker}
|
|
|
|
}
|