mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
Replaces 'disk'=>'drive' visible to end user (#15464)
This commit is contained in:
parent
e93867488b
commit
b57e7321e7
2
Makefile
2
Makefile
@ -91,7 +91,7 @@ verify-healing: ## verify healing and replacing disks with minio binary
|
||||
@(env bash $(PWD)/buildscripts/unaligned-healing.sh)
|
||||
|
||||
verify-healing-with-root-disks: ## verify healing root disks
|
||||
@echo "Verify healing with root disks"
|
||||
@echo "Verify healing with root drives"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
|
||||
|
||||
|
@ -1158,7 +1158,7 @@ var errorCodes = errorCodeMap{
|
||||
// MinIO extensions.
|
||||
ErrStorageFull: {
|
||||
Code: "XMinioStorageFull",
|
||||
Description: "Storage backend has reached its minimum free disk threshold. Please delete a few objects to proceed.",
|
||||
Description: "Storage backend has reached its minimum free drive threshold. Please delete a few objects to proceed.",
|
||||
HTTPStatusCode: http.StatusInsufficientStorage,
|
||||
},
|
||||
ErrRequestBodyParse: {
|
||||
|
@ -87,7 +87,7 @@ type healingTracker struct {
|
||||
// The disk ID will be validated against the loaded one.
|
||||
func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker, error) {
|
||||
if disk == nil {
|
||||
return nil, errors.New("loadHealingTracker: nil disk given")
|
||||
return nil, errors.New("loadHealingTracker: nil drive given")
|
||||
}
|
||||
diskID, err := disk.GetDiskID()
|
||||
if err != nil {
|
||||
@ -104,7 +104,7 @@ func loadHealingTracker(ctx context.Context, disk StorageAPI) (*healingTracker,
|
||||
return nil, err
|
||||
}
|
||||
if h.ID != diskID && h.ID != "" {
|
||||
return nil, fmt.Errorf("loadHealingTracker: disk id mismatch expected %s, got %s", h.ID, diskID)
|
||||
return nil, fmt.Errorf("loadHealingTracker: drive id mismatch expected %s, got %s", h.ID, diskID)
|
||||
}
|
||||
h.disk = disk
|
||||
h.ID = diskID
|
||||
@ -129,7 +129,7 @@ func newHealingTracker(disk StorageAPI) *healingTracker {
|
||||
// If the tracker has been deleted an error is returned.
|
||||
func (h *healingTracker) update(ctx context.Context) error {
|
||||
if h.disk.Healing() == nil {
|
||||
return fmt.Errorf("healingTracker: disk %q is not marked as healing", h.ID)
|
||||
return fmt.Errorf("healingTracker: drive %q is not marked as healing", h.ID)
|
||||
}
|
||||
if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 {
|
||||
h.ID, _ = h.disk.GetDiskID()
|
||||
@ -310,7 +310,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
}
|
||||
|
||||
// Prevent parallel erasure set healing
|
||||
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-disk-healing/%s/%d/%d", endpoint, poolIdx, setIdx))
|
||||
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%s/%d/%d", endpoint, poolIdx, setIdx))
|
||||
lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -337,14 +337,14 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
})
|
||||
|
||||
if serverDebugLog {
|
||||
logger.Info("Healing disk '%v' on %s pool", disk, humanize.Ordinal(poolIdx+1))
|
||||
logger.Info("Healing drive '%v' on %s pool", disk, humanize.Ordinal(poolIdx+1))
|
||||
}
|
||||
|
||||
// Load healing tracker in this disk
|
||||
tracker, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
// So someone changed the drives underneath, healing tracker missing.
|
||||
logger.LogIf(ctx, fmt.Errorf("Healing tracker missing on '%s', disk was swapped again on %s pool: %w",
|
||||
logger.LogIf(ctx, fmt.Errorf("Healing tracker missing on '%s', drive was swapped again on %s pool: %w",
|
||||
disk, humanize.Ordinal(poolIdx+1), err))
|
||||
tracker = newHealingTracker(disk)
|
||||
}
|
||||
@ -369,9 +369,9 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
||||
}
|
||||
|
||||
if tracker.ItemsFailed > 0 {
|
||||
logger.Info("Healing disk '%s' failed (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
|
||||
logger.Info("Healing drive '%s' failed (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
|
||||
} else {
|
||||
logger.Info("Healing disk '%s' complete (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
|
||||
logger.Info("Healing drive '%s' complete (healed: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsFailed)
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
|
@ -180,7 +180,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
b.h.Write(buf)
|
||||
|
||||
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s - content hash does not match - expected %s, got %s",
|
||||
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
|
||||
return 0, errFileCorrupt
|
||||
}
|
||||
|
@ -38,12 +38,12 @@ type wholeBitrotWriter struct {
|
||||
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
|
||||
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
_, err = b.Hash.Write(p)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s returned %w", b.disk, err))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
|
||||
return 0, err
|
||||
}
|
||||
return len(p), nil
|
||||
@ -72,12 +72,12 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
|
||||
if b.buf == nil {
|
||||
b.buf = make([]byte, b.tillOffset-offset)
|
||||
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if len(b.buf) < len(buf) {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
|
||||
return 0, errLessData
|
||||
}
|
||||
n = copy(buf, b.buf)
|
||||
|
@ -1992,7 +1992,7 @@ func (p *ReplicationPool) updateResyncStatus(ctx context.Context, objectAPI Obje
|
||||
if updt {
|
||||
brs.LastUpdate = now
|
||||
if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Could not save resync metadata to disk for %s - %w", bucket, err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Could not save resync metadata to drive for %s - %w", bucket, err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -607,12 +607,12 @@ func newCache(config cache.Config) ([]*diskCache, bool, error) {
|
||||
warningMsg = fmt.Sprintf("Invalid cache dir %s err : %s", dir, err.Error())
|
||||
}
|
||||
if rootDsk {
|
||||
warningMsg = fmt.Sprintf("cache dir cannot be part of root disk: %s", dir)
|
||||
warningMsg = fmt.Sprintf("cache dir cannot be part of root drive: %s", dir)
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkAtimeSupport(dir); err != nil {
|
||||
return nil, false, fmt.Errorf("Atime support required for disk caching, atime check failed with %w", err)
|
||||
return nil, false, fmt.Errorf("Atime support required for drive caching, atime check failed with %w", err)
|
||||
}
|
||||
|
||||
cache, err := newDiskCache(ctx, dir, config)
|
||||
@ -622,7 +622,7 @@ func newCache(config cache.Config) ([]*diskCache, bool, error) {
|
||||
caches = append(caches, cache)
|
||||
}
|
||||
if warningMsg != "" {
|
||||
logger.Info(color.Yellow(fmt.Sprintf("WARNING: Usage of root disk for disk caching is deprecated: %s", warningMsg)))
|
||||
logger.Info(color.Yellow(fmt.Sprintf("WARNING: Usage of root drive for drive caching is deprecated: %s", warningMsg)))
|
||||
}
|
||||
return caches, migrating, nil
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
|
||||
|
||||
setCounts := possibleSetCounts(commonSize)
|
||||
if len(setCounts) == 0 {
|
||||
msg := fmt.Sprintf("Incorrect number of endpoints provided %s, number of disks %d is not divisible by any supported erasure set sizes %d", args, commonSize, setSizes)
|
||||
msg := fmt.Sprintf("Incorrect number of endpoints provided %s, number of drives %d is not divisible by any supported erasure set sizes %d", args, commonSize, setSizes)
|
||||
return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg)
|
||||
}
|
||||
|
||||
@ -183,7 +183,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
|
||||
setCounts = possibleSetCountsWithSymmetry(setCounts, argPatterns)
|
||||
|
||||
if len(setCounts) == 0 {
|
||||
msg := fmt.Sprintf("No symmetric distribution detected with input endpoints provided %s, disks %d cannot be spread symmetrically by any supported erasure set sizes %d", args, commonSize, setSizes)
|
||||
msg := fmt.Sprintf("No symmetric distribution detected with input endpoints provided %s, drives %d cannot be spread symmetrically by any supported erasure set sizes %d", args, commonSize, setSizes)
|
||||
return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg)
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6
|
||||
|
||||
// Check whether setSize is with the supported range.
|
||||
if !isValidSetSize(setSize) {
|
||||
msg := fmt.Sprintf("Incorrect number of endpoints provided %s, number of disks %d is not divisible by any supported erasure set sizes %d", args, commonSize, setSizes)
|
||||
msg := fmt.Sprintf("Incorrect number of endpoints provided %s, number of drives %d is not divisible by any supported erasure set sizes %d", args, commonSize, setSizes)
|
||||
return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg)
|
||||
}
|
||||
|
||||
|
@ -20,10 +20,10 @@ package cmd
|
||||
import "errors"
|
||||
|
||||
// errErasureReadQuorum - did not meet read quorum.
|
||||
var errErasureReadQuorum = errors.New("Read failed. Insufficient number of disks online")
|
||||
var errErasureReadQuorum = errors.New("Read failed. Insufficient number of drives online")
|
||||
|
||||
// errErasureWriteQuorum - did not meet write quorum.
|
||||
var errErasureWriteQuorum = errors.New("Write failed. Insufficient number of disks online")
|
||||
var errErasureWriteQuorum = errors.New("Write failed. Insufficient number of drives online")
|
||||
|
||||
// errNoHealRequired - returned when healing is attempted on a previously healed disks.
|
||||
var errNoHealRequired = errors.New("No healing is required")
|
||||
|
@ -66,7 +66,7 @@ func TestErasureHeal(t *testing.T) {
|
||||
for i, test := range erasureHealTests {
|
||||
if test.offDisks < test.badStaleDisks {
|
||||
// test case sanity check
|
||||
t.Fatalf("Test %d: Bad test case - number of stale disks cannot be less than number of badstale disks", i)
|
||||
t.Fatalf("Test %d: Bad test case - number of stale drives cannot be less than number of badstale drives", i)
|
||||
}
|
||||
|
||||
// create some test data
|
||||
|
@ -260,7 +260,7 @@ func TestListOnlineDisks(t *testing.T) {
|
||||
|
||||
if test._tamperBackend != noTamper {
|
||||
if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
|
||||
t.Fatalf("disk (%v) with part.1 missing is not a disk with available data",
|
||||
t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data",
|
||||
erasureDisks[tamperedIndex])
|
||||
}
|
||||
}
|
||||
@ -446,7 +446,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
|
||||
|
||||
if test._tamperBackend != noTamper {
|
||||
if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
|
||||
t.Fatalf("disk (%v) with part.1 missing is not a disk with available data",
|
||||
t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data",
|
||||
erasureDisks[tamperedIndex])
|
||||
}
|
||||
}
|
||||
@ -506,7 +506,7 @@ func TestDisksWithAllParts(t *testing.T) {
|
||||
errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
|
||||
if len(filteredDisks) != len(erasureDisks) {
|
||||
t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
|
||||
t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
|
||||
}
|
||||
|
||||
for diskIndex, disk := range filteredDisks {
|
||||
@ -515,7 +515,7 @@ func TestDisksWithAllParts(t *testing.T) {
|
||||
}
|
||||
|
||||
if disk == nil {
|
||||
t.Errorf("Disk erroneously filtered, diskIndex: %d", diskIndex)
|
||||
t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
|
||||
}
|
||||
}
|
||||
|
||||
@ -528,14 +528,14 @@ func TestDisksWithAllParts(t *testing.T) {
|
||||
errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
|
||||
if len(filteredDisks) != len(erasureDisks) {
|
||||
t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
|
||||
t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
|
||||
}
|
||||
for diskIndex, disk := range filteredDisks {
|
||||
if diskIndex == 0 && disk != nil {
|
||||
t.Errorf("Disk not filtered as expected, disk: %d", diskIndex)
|
||||
t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
|
||||
}
|
||||
if diskIndex != 0 && disk == nil {
|
||||
t.Errorf("Disk erroneously filtered, diskIndex: %d", diskIndex)
|
||||
t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
|
||||
}
|
||||
}
|
||||
partsMetadata[0] = partsMetadataBackup // Revert before going to the next test
|
||||
@ -549,14 +549,14 @@ func TestDisksWithAllParts(t *testing.T) {
|
||||
errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
|
||||
if len(filteredDisks) != len(erasureDisks) {
|
||||
t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
|
||||
t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
|
||||
}
|
||||
for diskIndex, disk := range filteredDisks {
|
||||
if diskIndex == 1 && disk != nil {
|
||||
t.Errorf("Disk not filtered as expected, disk: %d", diskIndex)
|
||||
t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
|
||||
}
|
||||
if diskIndex != 1 && disk == nil {
|
||||
t.Errorf("Disk erroneously filtered, diskIndex: %d", diskIndex)
|
||||
t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
|
||||
}
|
||||
}
|
||||
partsMetadata[1] = partsMetadataBackup // Revert before going to the next test
|
||||
@ -586,23 +586,23 @@ func TestDisksWithAllParts(t *testing.T) {
|
||||
errs, fi, bucket, object, madmin.HealDeepScan)
|
||||
|
||||
if len(filteredDisks) != len(erasureDisks) {
|
||||
t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
|
||||
t.Errorf("Unexpected number of drives: %d", len(filteredDisks))
|
||||
}
|
||||
|
||||
for diskIndex, disk := range filteredDisks {
|
||||
if _, ok := diskFailures[diskIndex]; ok {
|
||||
if disk != nil {
|
||||
t.Errorf("Disk not filtered as expected, disk: %d", diskIndex)
|
||||
t.Errorf("Drive not filtered as expected, drive: %d", diskIndex)
|
||||
}
|
||||
if errs[diskIndex] == nil {
|
||||
t.Errorf("Expected error not received, diskIndex: %d", diskIndex)
|
||||
t.Errorf("Expected error not received, driveIndex: %d", diskIndex)
|
||||
}
|
||||
} else {
|
||||
if disk == nil {
|
||||
t.Errorf("Disk erroneously filtered, diskIndex: %d", diskIndex)
|
||||
t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex)
|
||||
}
|
||||
if errs[diskIndex] != nil {
|
||||
t.Errorf("Unexpected error, %s, diskIndex: %d", errs[diskIndex], diskIndex)
|
||||
t.Errorf("Unexpected error, %s, driveIndex: %d", errs[diskIndex], diskIndex)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -448,7 +448,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
|
||||
if !latestMeta.XLV1 && !latestMeta.Deleted && !recreate && disksToHealCount > latestMeta.Erasure.ParityBlocks {
|
||||
// When disk to heal count is greater than parity blocks we should simply error out.
|
||||
err := fmt.Errorf("more disks are expected to heal than parity, returned errors: %v (dataErrs %v) -> %s/%s(%s)", errs, dataErrs, bucket, object, versionID)
|
||||
err := fmt.Errorf("more drives are expected to heal than parity, returned errors: %v (dataErrs %v) -> %s/%s(%s)", errs, dataErrs, bucket, object, versionID)
|
||||
logger.LogIf(ctx, err)
|
||||
return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs,
|
||||
bucket, object, versionID), err
|
||||
@ -583,7 +583,7 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
|
||||
|
||||
// If all disks are having errors, we give up.
|
||||
if disksToHealCount == 0 {
|
||||
return result, fmt.Errorf("all disks had write errors, unable to heal %s/%s", bucket, object)
|
||||
return result, fmt.Errorf("all drives had write errors, unable to heal %s/%s", bucket, object)
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -275,7 +275,7 @@ func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []Stora
|
||||
// the corresponding error in errs slice is not nil
|
||||
func evalDisks(disks []StorageAPI, errs []error) []StorageAPI {
|
||||
if len(errs) != len(disks) {
|
||||
logger.LogIf(GlobalContext, errors.New("unexpected disks/errors slice length"))
|
||||
logger.LogIf(GlobalContext, errors.New("unexpected drives/errors slice length"))
|
||||
return nil
|
||||
}
|
||||
newDisks := make([]StorageAPI, len(disks))
|
||||
|
@ -457,7 +457,7 @@ func (p *poolMeta) updateAfter(ctx context.Context, idx int, pools []*erasureSet
|
||||
now := UTCNow()
|
||||
if now.Sub(p.Pools[idx].LastUpdate) >= duration {
|
||||
if serverDebugLog {
|
||||
console.Debugf("decommission: persisting poolMeta on disk: threshold:%s, poolMeta:%#v\n", now.Sub(p.Pools[idx].LastUpdate), p.Pools[idx])
|
||||
console.Debugf("decommission: persisting poolMeta on drive: threshold:%s, poolMeta:%#v\n", now.Sub(p.Pools[idx].LastUpdate), p.Pools[idx])
|
||||
}
|
||||
p.Pools[idx].LastUpdate = now
|
||||
if err := p.save(ctx, pools); err != nil {
|
||||
@ -677,7 +677,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
||||
set := set
|
||||
disks := set.getOnlineDisks()
|
||||
if len(disks) == 0 {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("no online disks found for set with endpoints %s",
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("no online drives found for set with endpoints %s",
|
||||
set.getEndpoints()))
|
||||
continue
|
||||
}
|
||||
|
@ -1907,7 +1907,7 @@ func listAndHeal(ctx context.Context, bucket, prefix string, set *erasureObjects
|
||||
|
||||
disks, _ := set.getOnlineDisksWithHealing()
|
||||
if len(disks) == 0 {
|
||||
return errors.New("listAndHeal: No non-healing disks found")
|
||||
return errors.New("listAndHeal: No non-healing drives found")
|
||||
}
|
||||
|
||||
// How to resolve partial results.
|
||||
@ -2098,7 +2098,7 @@ func (z *erasureServerPools) getPoolAndSet(id string) (poolIdx, setIdx, diskIdx
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, -1, -1, fmt.Errorf("DiskID(%s) %w", id, errDiskNotFound)
|
||||
return -1, -1, -1, fmt.Errorf("DriveID(%s) %w", id, errDiskNotFound)
|
||||
}
|
||||
|
||||
// HealthOptions takes input options to return sepcific information
|
||||
|
@ -129,10 +129,10 @@ func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatErasureV3, error) {
|
||||
if errors.Is(err, errUnformattedDisk) {
|
||||
info, derr := disk.DiskInfo(context.TODO())
|
||||
if derr != nil && info.RootDisk {
|
||||
return nil, nil, fmt.Errorf("Disk: %s is a root disk", disk)
|
||||
return nil, nil, fmt.Errorf("Drive: %s is a root drive", disk)
|
||||
}
|
||||
}
|
||||
return nil, nil, fmt.Errorf("Disk: %s returned %w", disk, err) // make sure to '%w' to wrap the error
|
||||
return nil, nil, fmt.Errorf("Drive: %s returned %w", disk, err) // make sure to '%w' to wrap the error
|
||||
}
|
||||
|
||||
return disk, format, nil
|
||||
@ -147,7 +147,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int,
|
||||
return -1, -1, errDiskNotFound
|
||||
}
|
||||
if diskID == offlineDiskUUID {
|
||||
return -1, -1, fmt.Errorf("diskID: %s is offline", diskID)
|
||||
return -1, -1, fmt.Errorf("DriveID: %s is offline", diskID)
|
||||
}
|
||||
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
|
||||
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
|
||||
@ -157,7 +157,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int,
|
||||
}
|
||||
}
|
||||
|
||||
return -1, -1, fmt.Errorf("diskID: %s not found", diskID)
|
||||
return -1, -1, fmt.Errorf("DriveID: %s not found", diskID)
|
||||
}
|
||||
|
||||
// findDiskIndex - returns the i,j'th position of the input `format` against the reference
|
||||
@ -170,7 +170,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
|
||||
}
|
||||
|
||||
if format.Erasure.This == offlineDiskUUID {
|
||||
return -1, -1, fmt.Errorf("diskID: %s is offline", format.Erasure.This)
|
||||
return -1, -1, fmt.Errorf("DriveID: %s is offline", format.Erasure.This)
|
||||
}
|
||||
|
||||
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
|
||||
@ -181,7 +181,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return -1, -1, fmt.Errorf("diskID: %s not found", format.Erasure.This)
|
||||
return -1, -1, fmt.Errorf("DriveID: %s not found", format.Erasure.This)
|
||||
}
|
||||
|
||||
// connectDisks - attempt to connect all the endpoints, loads format
|
||||
@ -239,7 +239,7 @@ func (s *erasureSets) connectDisks() {
|
||||
s.erasureDisksMu.Lock()
|
||||
if currentDisk := s.erasureDisks[setIndex][diskIndex]; currentDisk != nil {
|
||||
if !reflect.DeepEqual(currentDisk.Endpoint(), disk.Endpoint()) {
|
||||
err = fmt.Errorf("Detected unexpected disk ordering refusing to use the disk: expecting %s, found %s, refusing to use the disk",
|
||||
err = fmt.Errorf("Detected unexpected drive ordering refusing to use the drive: expecting %s, found %s, refusing to use the drive",
|
||||
currentDisk.Endpoint(), disk.Endpoint())
|
||||
printEndpointError(endpoint, err, false)
|
||||
disk.Close()
|
||||
@ -300,7 +300,7 @@ func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInt
|
||||
return
|
||||
case <-monitor.C:
|
||||
if serverDebugLog {
|
||||
console.Debugln("running disk monitoring")
|
||||
console.Debugln("running drive monitoring")
|
||||
}
|
||||
|
||||
s.connectDisks()
|
||||
@ -446,7 +446,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [
|
||||
return
|
||||
}
|
||||
if m != i || n != j {
|
||||
logger.LogIf(ctx, fmt.Errorf("Detected unexpected disk ordering refusing to use the disk - poolID: %s, found disk mounted at (set=%s, disk=%s) expected mount at (set=%s, disk=%s): %s(%s)", humanize.Ordinal(poolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(i+1), humanize.Ordinal(j+1), disk, diskID))
|
||||
logger.LogIf(ctx, fmt.Errorf("Detected unexpected drive ordering refusing to use the drive - poolID: %s, found drive mounted at (set=%s, drive=%s) expected mount at (set=%s, drive=%s): %s(%s)", humanize.Ordinal(poolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(i+1), humanize.Ordinal(j+1), disk, diskID))
|
||||
s.erasureDisks[i][j] = &unrecognizedDisk{storage: disk}
|
||||
return
|
||||
}
|
||||
@ -1240,7 +1240,7 @@ func markRootDisksAsDown(storageDisks []StorageAPI, errs []error) {
|
||||
if storageDisks[i] != nil && infos[i].RootDisk {
|
||||
// We should not heal on root disk. i.e in a situation where the minio-administrator has unmounted a
|
||||
// defective drive we should not heal a path on the root disk.
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Disk `%s` is part of root disk, will not be used", storageDisks[i]))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive `%s` is part of root drive, will not be used", storageDisks[i]))
|
||||
storageDisks[i] = nil
|
||||
}
|
||||
}
|
||||
@ -1314,7 +1314,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
|
||||
continue
|
||||
}
|
||||
if err := saveFormatErasure(storageDisks[index], format, true); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Disk %s failed to write updated 'format.json': %v", storageDisks[index], err))
|
||||
logger.LogIf(ctx, fmt.Errorf("Drive %s failed to write updated 'format.json': %v", storageDisks[index], err))
|
||||
tmpNewFormats[index] = nil // this disk failed to write new format
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ func TestNewErasureSets(t *testing.T) {
|
||||
// Initializes all erasure disks
|
||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to format disks for erasure, %s", err)
|
||||
t.Fatalf("Unable to format drives for erasure, %s", err)
|
||||
}
|
||||
|
||||
ep := PoolEndpoints{Endpoints: endpoints}
|
||||
|
@ -288,7 +288,7 @@ func (er erasureObjects) getOnlineDisksWithHealing() (newDisks []StorageAPI, hea
|
||||
disk := disks[i-1]
|
||||
|
||||
if disk == nil {
|
||||
infos[i-1].Error = "nil disk"
|
||||
infos[i-1].Error = "nil drive"
|
||||
return
|
||||
}
|
||||
|
||||
@ -354,7 +354,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, bf
|
||||
// Collect disks we can use.
|
||||
disks, healing := er.getOnlineDisksWithHealing()
|
||||
if len(disks) == 0 {
|
||||
logger.LogIf(ctx, errors.New("data-scanner: all disks are offline or being healed, skipping scanner cycle"))
|
||||
logger.LogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle"))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ type formatErasureV1 struct {
|
||||
formatMetaV1
|
||||
Erasure struct {
|
||||
Version string `json:"version"` // Version of 'xl' format.
|
||||
Disk string `json:"disk"` // Disk field carries assigned disk uuid.
|
||||
Disk string `json:"drive"` // Disk field carries assigned disk uuid.
|
||||
// JBOD field carries the input disk order generated the first
|
||||
// time when fresh disks were supplied.
|
||||
JBOD []string `json:"jbod"`
|
||||
@ -199,7 +199,7 @@ func formatErasureMigrate(export string) ([]byte, fs.FileInfo, error) {
|
||||
|
||||
version, err := formatGetBackendErasureVersion(formatData)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Disk %s: %w", export, err)
|
||||
return nil, nil, fmt.Errorf("Drive %s: %w", export, err)
|
||||
}
|
||||
|
||||
migrate := func(formatPath string, formatData []byte) ([]byte, fs.FileInfo, error) {
|
||||
@ -217,7 +217,7 @@ func formatErasureMigrate(export string) ([]byte, fs.FileInfo, error) {
|
||||
case formatErasureVersionV1:
|
||||
formatData, err = formatErasureMigrateV1ToV2(formatData, version)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Disk %s: %w", export, err)
|
||||
return nil, nil, fmt.Errorf("Drive %s: %w", export, err)
|
||||
}
|
||||
// Migrate successful v1 => v2, proceed to v2 => v3
|
||||
version = formatErasureVersionV2
|
||||
@ -225,7 +225,7 @@ func formatErasureMigrate(export string) ([]byte, fs.FileInfo, error) {
|
||||
case formatErasureVersionV2:
|
||||
formatData, err = formatErasureMigrateV2ToV3(formatData, export, version)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Disk %s: %w", export, err)
|
||||
return nil, nil, fmt.Errorf("Drive %s: %w", export, err)
|
||||
}
|
||||
// Migrate successful v2 => v3, v3 is latest
|
||||
// version = formatXLVersionV3
|
||||
@ -438,14 +438,14 @@ func checkFormatErasureValues(formats []*formatErasureV3, disks []StorageAPI, se
|
||||
return err
|
||||
}
|
||||
if len(formats) != len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]) {
|
||||
return fmt.Errorf("%s disk is already being used in another erasure deployment. (Number of disks specified: %d but the number of disks found in the %s disk's format.json: %d)",
|
||||
return fmt.Errorf("%s drive is already being used in another erasure deployment. (Number of drives specified: %d but the number of drives found in the %s drive's format.json: %d)",
|
||||
disks[i], len(formats), humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]))
|
||||
}
|
||||
// Only if custom erasure drive count is set, verify if the
|
||||
// set_drive_count was manually set - we need to honor what is
|
||||
// present on the drives.
|
||||
if globalCustomErasureDriveCount && len(formatErasure.Erasure.Sets[0]) != setDriveCount {
|
||||
return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", disks[i], len(formatErasure.Erasure.Sets[0]), setDriveCount)
|
||||
return fmt.Errorf("%s drive is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", disks[i], len(formatErasure.Erasure.Sets[0]), setDriveCount)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -628,7 +628,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Disk ID %s not found in any disk sets %s", this, format.Erasure.Sets)
|
||||
return fmt.Errorf("DriveID %s not found in any drive sets %s", this, format.Erasure.Sets)
|
||||
}
|
||||
|
||||
// saveFormatErasureAll - populates `format.json` on disks in its order.
|
||||
|
@ -149,13 +149,13 @@ func TestFormatErasureMigrate(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if formatV3.Erasure.This != m.Erasure.Disk {
|
||||
t.Fatalf("expected disk uuid: %s, got: %s", m.Erasure.Disk, formatV3.Erasure.This)
|
||||
t.Fatalf("expected drive uuid: %s, got: %s", m.Erasure.Disk, formatV3.Erasure.This)
|
||||
}
|
||||
if len(formatV3.Erasure.Sets) != 1 {
|
||||
t.Fatalf("expected single set after migrating from v1 to v3, but found %d", len(formatV3.Erasure.Sets))
|
||||
}
|
||||
if !reflect.DeepEqual(formatV3.Erasure.Sets[0], m.Erasure.JBOD) {
|
||||
t.Fatalf("expected disk uuid: %v, got: %v", m.Erasure.JBOD, formatV3.Erasure.Sets[0])
|
||||
t.Fatalf("expected drive uuid: %v, got: %v", m.Erasure.JBOD, formatV3.Erasure.Sets[0])
|
||||
}
|
||||
|
||||
m = &formatErasureV1{}
|
||||
|
@ -41,7 +41,7 @@ func TestNewFS(t *testing.T) {
|
||||
}
|
||||
_, err = NewFSObjectLayer(disk)
|
||||
if err != nil {
|
||||
errMsg := "Unable to recognize backend format, Disk is not in FS format."
|
||||
errMsg := "Unable to recognize backend format, Drive is not in FS format."
|
||||
if err.Error() == errMsg {
|
||||
t.Errorf("Expecting %s, got %s", errMsg, err)
|
||||
}
|
||||
|
@ -323,7 +323,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
||||
// initialize the new disk cache objects.
|
||||
var cacheAPI CacheObjectLayer
|
||||
cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig)
|
||||
logger.FatalIf(err, "Unable to initialize disk caching")
|
||||
logger.FatalIf(err, "Unable to initialize drive caching")
|
||||
|
||||
globalObjLayerMutex.Lock()
|
||||
globalCacheObjectAPI = cacheAPI
|
||||
|
@ -209,7 +209,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
|
||||
}
|
||||
|
||||
if serverDebugLog {
|
||||
console.Debugf(color.Green("healDisk:")+" healing bucket %s content on %s erasure set\n",
|
||||
console.Debugf(color.Green("healDrive:")+" healing bucket %s content on %s erasure set\n",
|
||||
bucket, humanize.Ordinal(tracker.SetIndex+1))
|
||||
}
|
||||
|
||||
|
@ -1332,7 +1332,7 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
|
||||
if err != nil {
|
||||
if disks[i] != nil {
|
||||
combinedErr = append(combinedErr,
|
||||
fmt.Sprintf("disk %s returned: %s", disks[i], err))
|
||||
fmt.Sprintf("drive %s returned: %s", disks[i], err))
|
||||
} else {
|
||||
combinedErr = append(combinedErr, err.Error())
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ func getNodeDiskAPILatencyMD() MetricDescription {
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: diskSubsystem,
|
||||
Name: apiLatencyMicroSec,
|
||||
Help: "Average last minute latency in µs for disk API storage operations",
|
||||
Help: "Average last minute latency in µs for drive API storage operations",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
@ -342,7 +342,7 @@ func getNodeDiskUsedBytesMD() MetricDescription {
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: diskSubsystem,
|
||||
Name: usedBytes,
|
||||
Help: "Total storage used on a disk",
|
||||
Help: "Total storage used on a drive",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
@ -352,7 +352,7 @@ func getNodeDiskFreeBytesMD() MetricDescription {
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: diskSubsystem,
|
||||
Name: freeBytes,
|
||||
Help: "Total storage available on a disk",
|
||||
Help: "Total storage available on a drive",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
@ -362,7 +362,7 @@ func getClusterDisksOfflineTotalMD() MetricDescription {
|
||||
Namespace: clusterMetricNamespace,
|
||||
Subsystem: diskSubsystem,
|
||||
Name: offlineTotal,
|
||||
Help: "Total disks offline",
|
||||
Help: "Total drives offline",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
@ -372,7 +372,7 @@ func getClusterDisksOnlineTotalMD() MetricDescription {
|
||||
Namespace: clusterMetricNamespace,
|
||||
Subsystem: diskSubsystem,
|
||||
Name: onlineTotal,
|
||||
Help: "Total disks online",
|
||||
Help: "Total drives online",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
@ -382,7 +382,7 @@ func getClusterDisksTotalMD() MetricDescription {
|
||||
Namespace: clusterMetricNamespace,
|
||||
Subsystem: diskSubsystem,
|
||||
Name: total,
|
||||
Help: "Total disks",
|
||||
Help: "Total drives",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
@ -402,7 +402,7 @@ func getNodeDiskTotalBytesMD() MetricDescription {
|
||||
Namespace: nodeMetricNamespace,
|
||||
Subsystem: diskSubsystem,
|
||||
Name: totalBytes,
|
||||
Help: "Total storage on a disk",
|
||||
Help: "Total storage on a drive",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
@ -702,7 +702,7 @@ func getCacheHitsTotalMD() MetricDescription {
|
||||
Namespace: minioNamespace,
|
||||
Subsystem: cacheSubsystem,
|
||||
Name: hitsTotal,
|
||||
Help: "Total number of disk cache hits",
|
||||
Help: "Total number of drive cache hits",
|
||||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
@ -712,7 +712,7 @@ func getCacheHitsMissedTotalMD() MetricDescription {
|
||||
Namespace: minioNamespace,
|
||||
Subsystem: cacheSubsystem,
|
||||
Name: missedTotal,
|
||||
Help: "Total number of disk cache misses",
|
||||
Help: "Total number of drive cache misses",
|
||||
Type: counterMetric,
|
||||
}
|
||||
}
|
||||
@ -752,7 +752,7 @@ func getCacheTotalBytesMD() MetricDescription {
|
||||
Namespace: minioNamespace,
|
||||
Subsystem: cacheSubsystem,
|
||||
Name: totalBytes,
|
||||
Help: "Total size of cache disk in bytes",
|
||||
Help: "Total size of cache drive in bytes",
|
||||
Type: gaugeMetric,
|
||||
}
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ func cacheMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(cacheNamespace, "hits", "total"),
|
||||
"Total number of disk cache hits in current MinIO instance",
|
||||
"Total number of drive cache hits in current MinIO instance",
|
||||
nil, nil),
|
||||
prometheus.CounterValue,
|
||||
float64(cacheObjLayer.CacheStats().getHits()),
|
||||
@ -281,7 +281,7 @@ func cacheMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(cacheNamespace, "misses", "total"),
|
||||
"Total number of disk cache misses in current MinIO instance",
|
||||
"Total number of drive cache misses in current MinIO instance",
|
||||
nil, nil),
|
||||
prometheus.CounterValue,
|
||||
float64(cacheObjLayer.CacheStats().getMisses()),
|
||||
@ -328,7 +328,7 @@ func cacheMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName("cache", "total", "size"),
|
||||
"Indicates total size of cache disk",
|
||||
"Indicates total size of cache drive",
|
||||
[]string{"disk"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(cdStats.TotalCapacity),
|
||||
@ -593,7 +593,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(minioNamespace, "disks", "offline"),
|
||||
"Total number of offline disks in current MinIO server instance",
|
||||
"Total number of offline drives in current MinIO server instance",
|
||||
nil, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(offlineDisks.Sum()),
|
||||
@ -602,8 +602,8 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
// MinIO Total Disks per node
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(minioNamespace, "disks", "total"),
|
||||
"Total number of disks for current MinIO server instance",
|
||||
prometheus.BuildFQName(minioNamespace, "drives", "total"),
|
||||
"Total number of drives for current MinIO server instance",
|
||||
nil, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(totalDisks.Sum()),
|
||||
@ -614,7 +614,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(diskNamespace, "storage", "used"),
|
||||
"Total disk storage used on the disk",
|
||||
"Total disk storage used on the drive",
|
||||
[]string{"disk"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(disk.UsedSpace),
|
||||
@ -625,7 +625,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(diskNamespace, "storage", "available"),
|
||||
"Total available space left on the disk",
|
||||
"Total available space left on the drive",
|
||||
[]string{"disk"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(disk.AvailableSpace),
|
||||
@ -636,7 +636,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(diskNamespace, "storage", "total"),
|
||||
"Total space on the disk",
|
||||
"Total space on the drive",
|
||||
[]string{"disk"}, nil),
|
||||
prometheus.GaugeValue,
|
||||
float64(disk.TotalSpace),
|
||||
|
@ -449,7 +449,7 @@ func (sys *NotificationSys) updateBloomFilter(ctx context.Context, current uint6
|
||||
serverBF, err := client.cycleServerBloomFilter(ctx, req)
|
||||
if false && intDataUpdateTracker.debug {
|
||||
b, _ := json.MarshalIndent(serverBF, "", " ")
|
||||
logger.Info("Disk %v, Bloom filter: %v", client.host.Name, string(b))
|
||||
logger.Info("Drive %v, Bloom filter: %v", client.host.Name, string(b))
|
||||
}
|
||||
// Keep lock while checking result.
|
||||
mu.Lock()
|
||||
|
@ -186,7 +186,7 @@ func (e SignatureDoesNotMatch) Error() string {
|
||||
type StorageFull struct{}
|
||||
|
||||
func (e StorageFull) Error() string {
|
||||
return "Storage reached its minimum free disk threshold."
|
||||
return "Storage reached its minimum free drive threshold."
|
||||
}
|
||||
|
||||
// SlowDown too many file descriptors open or backend busy .
|
||||
|
@ -305,20 +305,20 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCou
|
||||
switch err {
|
||||
case errNotFirstDisk:
|
||||
// Fresh setup, wait for first server to be up.
|
||||
logger.Info("Waiting for the first server to format the disks (elapsed %s)\n", getElapsedTime())
|
||||
logger.Info("Waiting for the first server to format the drives (elapsed %s)\n", getElapsedTime())
|
||||
continue
|
||||
case errFirstDiskWait:
|
||||
// Fresh setup, wait for other servers to come up.
|
||||
logger.Info("Waiting for all other servers to be online to format the disks (elapses %s)\n", getElapsedTime())
|
||||
logger.Info("Waiting for all other servers to be online to format the drives (elapses %s)\n", getElapsedTime())
|
||||
continue
|
||||
case errErasureReadQuorum:
|
||||
// no quorum available continue to wait for minimum number of servers.
|
||||
logger.Info("Waiting for a minimum of %d disks to come online (elapsed %s)\n",
|
||||
logger.Info("Waiting for a minimum of %d drives to come online (elapsed %s)\n",
|
||||
len(endpoints)/2, getElapsedTime())
|
||||
continue
|
||||
case errErasureWriteQuorum:
|
||||
// no quorum available continue to wait for minimum number of servers.
|
||||
logger.Info("Waiting for a minimum of %d disks to come online (elapsed %s)\n",
|
||||
logger.Info("Waiting for a minimum of %d drives to come online (elapsed %s)\n",
|
||||
(len(endpoints)/2)+1, getElapsedTime())
|
||||
continue
|
||||
case errErasureV3ThisEmpty:
|
||||
|
@ -631,10 +631,10 @@ func serverMain(ctx *cli.Context) {
|
||||
|
||||
// initialize the new disk cache objects.
|
||||
if globalCacheConfig.Enabled {
|
||||
logger.Info(color.Yellow("WARNING: Disk caching is deprecated for single/multi drive MinIO setups. Please migrate to using MinIO S3 gateway instead of disk caching"))
|
||||
logger.Info(color.Yellow("WARNING: Drive caching is deprecated for single/multi drive MinIO setups. Please migrate to using MinIO S3 gateway instead of drive caching"))
|
||||
var cacheAPI CacheObjectLayer
|
||||
cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig)
|
||||
logger.FatalIf(err, "Unable to initialize disk caching")
|
||||
logger.FatalIf(err, "Unable to initialize drive caching")
|
||||
|
||||
setCacheObjectLayer(cacheAPI)
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ func TestNewObjectLayer(t *testing.T) {
|
||||
nDisks := 1
|
||||
disks, err := getRandomDisks(nDisks)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to create disks for the backend")
|
||||
t.Fatal("Failed to create drives for the backend")
|
||||
}
|
||||
defer removeRoots(disks)
|
||||
|
||||
@ -50,7 +50,7 @@ func TestNewObjectLayer(t *testing.T) {
|
||||
nDisks = 16
|
||||
disks, err = getRandomDisks(nDisks)
|
||||
if err != nil {
|
||||
t.Fatal("Failed to create disks for the backend")
|
||||
t.Fatal("Failed to create drives for the backend")
|
||||
}
|
||||
defer removeRoots(disks)
|
||||
|
||||
|
@ -206,7 +206,7 @@ func getStorageInfoMsg(storageInfo StorageInfo) string {
|
||||
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks)
|
||||
if storageInfo.Backend.Type == madmin.Erasure {
|
||||
if offlineDisks.Sum() > 0 {
|
||||
mcMessage = "Use `mc admin info` to look for latest server/disk info\n"
|
||||
mcMessage = "Use `mc admin info` to look for latest server/drive info\n"
|
||||
}
|
||||
|
||||
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", onlineDisks.Sum(), offlineDisks.Sum())
|
||||
|
@ -542,7 +542,7 @@ func (c *SiteReplicationSys) PeerJoinReq(ctx context.Context, arg madmin.SRPeerJ
|
||||
ServiceAccountAccessKey: arg.SvcAcctAccessKey,
|
||||
}
|
||||
if err = c.saveToDisk(ctx, state); err != nil {
|
||||
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to disk on %s: %v", ourName, err))
|
||||
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to drive on %s: %v", ourName, err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -2143,7 +2143,7 @@ func (c *SiteReplicationSys) InternalRemoveReq(ctx context.Context, objectAPI Ob
|
||||
}
|
||||
|
||||
if err := c.saveToDisk(ctx, state); err != nil {
|
||||
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to disk on %s: %v", ourName, err))
|
||||
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to drive on %s: %v", ourName, err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -3492,7 +3492,7 @@ func (c *SiteReplicationSys) PeerEditReq(ctx context.Context, arg madmin.PeerInf
|
||||
}
|
||||
}
|
||||
if err := c.saveToDisk(ctx, c.state); err != nil {
|
||||
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to disk on %s: %v", ourName, err))
|
||||
return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to drive on %s: %v", ourName, err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -56,8 +56,8 @@ func BenchmarkDecodeDiskInfoMsgp(b *testing.B) {
|
||||
FSType: "xfs",
|
||||
RootDisk: true,
|
||||
Healing: true,
|
||||
Endpoint: "http://localhost:9001/tmp/disk1",
|
||||
MountPath: "/tmp/disk1",
|
||||
Endpoint: "http://localhost:9001/tmp/drive1",
|
||||
MountPath: "/tmp/drive1",
|
||||
ID: "uuid",
|
||||
Error: "",
|
||||
}
|
||||
@ -85,8 +85,8 @@ func BenchmarkDecodeDiskInfoGOB(b *testing.B) {
|
||||
FSType: "xfs",
|
||||
RootDisk: true,
|
||||
Healing: true,
|
||||
Endpoint: "http://localhost:9001/tmp/disk1",
|
||||
MountPath: "/tmp/disk1",
|
||||
Endpoint: "http://localhost:9001/tmp/drive1",
|
||||
MountPath: "/tmp/drive1",
|
||||
ID: "uuid",
|
||||
Error: "",
|
||||
}
|
||||
@ -115,8 +115,8 @@ func BenchmarkEncodeDiskInfoMsgp(b *testing.B) {
|
||||
FSType: "xfs",
|
||||
RootDisk: true,
|
||||
Healing: true,
|
||||
Endpoint: "http://localhost:9001/tmp/disk1",
|
||||
MountPath: "/tmp/disk1",
|
||||
Endpoint: "http://localhost:9001/tmp/drive1",
|
||||
MountPath: "/tmp/drive1",
|
||||
ID: "uuid",
|
||||
Error: "",
|
||||
}
|
||||
@ -140,8 +140,8 @@ func BenchmarkEncodeDiskInfoGOB(b *testing.B) {
|
||||
FSType: "xfs",
|
||||
RootDisk: true,
|
||||
Healing: true,
|
||||
Endpoint: "http://localhost:9001/tmp/disk1",
|
||||
MountPath: "/tmp/disk1",
|
||||
Endpoint: "http://localhost:9001/tmp/drive1",
|
||||
MountPath: "/tmp/drive1",
|
||||
ID: "uuid",
|
||||
Error: "",
|
||||
}
|
||||
|
@ -28,34 +28,34 @@ import (
|
||||
var errUnexpected = StorageErr("unexpected error, please report this issue at https://github.com/minio/minio/issues")
|
||||
|
||||
// errCorruptedFormat - corrupted backend format.
|
||||
var errCorruptedFormat = StorageErr("corrupted backend format, specified disk mount has unexpected previous content")
|
||||
var errCorruptedFormat = StorageErr("corrupted backend format, specified drive mount has unexpected previous content")
|
||||
|
||||
// errUnformattedDisk - unformatted disk found.
|
||||
var errUnformattedDisk = StorageErr("unformatted disk found")
|
||||
var errUnformattedDisk = StorageErr("unformatted drive found")
|
||||
|
||||
// errInconsistentDisk - inconsistent disk found.
|
||||
var errInconsistentDisk = StorageErr("inconsistent disk found")
|
||||
var errInconsistentDisk = StorageErr("inconsistent drive found")
|
||||
|
||||
// errUnsupporteDisk - when disk does not support O_DIRECT flag.
|
||||
var errUnsupportedDisk = StorageErr("disk does not support O_DIRECT")
|
||||
var errUnsupportedDisk = StorageErr("drive does not support O_DIRECT")
|
||||
|
||||
// errDiskFull - cannot create volume or files when disk is full.
|
||||
var errDiskFull = StorageErr("disk path full")
|
||||
var errDiskFull = StorageErr("drive path full")
|
||||
|
||||
// errDiskNotDir - cannot use storage disk if its not a directory
|
||||
var errDiskNotDir = StorageErr("disk is not directory or mountpoint")
|
||||
var errDiskNotDir = StorageErr("drive is not directory or mountpoint")
|
||||
|
||||
// errDiskNotFound - cannot find the underlying configured disk anymore.
|
||||
var errDiskNotFound = StorageErr("disk not found")
|
||||
var errDiskNotFound = StorageErr("drive not found")
|
||||
|
||||
// errFaultyRemoteDisk - remote disk is faulty.
|
||||
var errFaultyRemoteDisk = StorageErr("remote disk is faulty")
|
||||
var errFaultyRemoteDisk = StorageErr("remote drive is faulty")
|
||||
|
||||
// errFaultyDisk - disk is faulty.
|
||||
var errFaultyDisk = StorageErr("disk is faulty")
|
||||
var errFaultyDisk = StorageErr("drive is faulty")
|
||||
|
||||
// errDiskAccessDenied - we don't have write permissions on disk.
|
||||
var errDiskAccessDenied = StorageErr("disk access denied")
|
||||
var errDiskAccessDenied = StorageErr("drive access denied")
|
||||
|
||||
// errFileNotFound - cannot find the file.
|
||||
var errFileNotFound = StorageErr("file not found")
|
||||
@ -101,7 +101,7 @@ var errBitrotHashAlgoInvalid = StorageErr("bit-rot hash algorithm is invalid")
|
||||
var errCrossDeviceLink = StorageErr("Rename across devices not allowed, please fix your backend configuration")
|
||||
|
||||
// errMinDiskSize - cannot create volume or files when disk size is less than threshold.
|
||||
var errMinDiskSize = StorageErr("The disk size is less than 900MiB threshold")
|
||||
var errMinDiskSize = StorageErr("The drive size is less than 900MiB threshold")
|
||||
|
||||
// errLessData - returned when less data available than what was requested.
|
||||
var errLessData = StorageErr("less data available than what was requested")
|
||||
@ -117,10 +117,10 @@ var errDoneForNow = errors.New("done for now")
|
||||
var errSkipFile = errors.New("skip this file")
|
||||
|
||||
// Returned by FS drive mode when a fresh disk is specified.
|
||||
var errFreshDisk = errors.New("FS backend requires existing disk")
|
||||
var errFreshDisk = errors.New("FS backend requires existing drive")
|
||||
|
||||
// errXLBackend XL drive mode requires fresh deployment.
|
||||
var errXLBackend = errors.New("XL backend requires fresh disk")
|
||||
var errXLBackend = errors.New("XL backend requires fresh drive")
|
||||
|
||||
// StorageErr represents error generated by xlStorage call.
|
||||
type StorageErr string
|
||||
|
@ -49,7 +49,7 @@ import (
|
||||
xnet "github.com/minio/pkg/net"
|
||||
)
|
||||
|
||||
var errDiskStale = errors.New("disk stale")
|
||||
var errDiskStale = errors.New("drive stale")
|
||||
|
||||
// To abstract a disk over network.
|
||||
type storageRESTServer struct {
|
||||
@ -1171,17 +1171,17 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) {
|
||||
case errors.Is(err, errUnsupportedDisk):
|
||||
var hint string
|
||||
if endpoint.URL != nil {
|
||||
hint = fmt.Sprintf("Disk '%s' does not support O_DIRECT flags, MinIO erasure coding requires filesystems with O_DIRECT support", endpoint.Path)
|
||||
hint = fmt.Sprintf("Drive '%s' does not support O_DIRECT flags, MinIO erasure coding requires filesystems with O_DIRECT support", endpoint.Path)
|
||||
} else {
|
||||
hint = "Disks do not support O_DIRECT flags, MinIO erasure coding requires filesystems with O_DIRECT support"
|
||||
hint = "Drives do not support O_DIRECT flags, MinIO erasure coding requires filesystems with O_DIRECT support"
|
||||
}
|
||||
logger.Fatal(config.ErrUnsupportedBackend(err).Hint(hint), "Unable to initialize backend")
|
||||
case errors.Is(err, errDiskNotDir):
|
||||
var hint string
|
||||
if endpoint.URL != nil {
|
||||
hint = fmt.Sprintf("Disk '%s' is not a directory, MinIO erasure coding needs a directory", endpoint.Path)
|
||||
hint = fmt.Sprintf("Drive '%s' is not a directory, MinIO erasure coding needs a directory", endpoint.Path)
|
||||
} else {
|
||||
hint = "Disks are not directories, MinIO erasure coding needs directories"
|
||||
hint = "Drives are not directories, MinIO erasure coding needs directories"
|
||||
}
|
||||
logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend")
|
||||
case errors.Is(err, errDiskAccessDenied):
|
||||
@ -1200,25 +1200,25 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) {
|
||||
hint = fmt.Sprintf("Run the following command to add write permissions: `sudo chown -R %s. <path> && sudo chmod u+rxw <path>`", username)
|
||||
}
|
||||
if !exit {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("disk is not writable %s, %s", endpoint, hint))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive is not writable %s, %s", endpoint, hint))
|
||||
} else {
|
||||
logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend")
|
||||
}
|
||||
case errors.Is(err, errFaultyDisk):
|
||||
if !exit {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("disk is faulty at %s, please replace the drive - disk will be offline", endpoint))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive is faulty at %s, please replace the drive - drive will be offline", endpoint))
|
||||
} else {
|
||||
logger.Fatal(err, "Unable to initialize backend")
|
||||
}
|
||||
case errors.Is(err, errDiskFull):
|
||||
if !exit {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("disk is already full at %s, incoming I/O will fail - disk will be offline", endpoint))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive is already full at %s, incoming I/O will fail - drive will be offline", endpoint))
|
||||
} else {
|
||||
logger.Fatal(err, "Unable to initialize backend")
|
||||
}
|
||||
default:
|
||||
if !exit {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("disk returned an unexpected error at %s, please investigate - disk will be offline (%w)", endpoint, err))
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Drive returned an unexpected error at %s, please investigate - drive will be offline (%w)", endpoint, err))
|
||||
} else {
|
||||
logger.Fatal(err, "Unable to initialize backend")
|
||||
}
|
||||
|
@ -1898,7 +1898,7 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType)
|
||||
nDisks := 16
|
||||
erasureDisks, err := getRandomDisks(nDisks)
|
||||
if err != nil {
|
||||
t.Fatalf("Initialization of disks for Erasure setup: %s", err)
|
||||
t.Fatalf("Initialization of drives for Erasure setup: %s", err)
|
||||
}
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(erasureDisks...))
|
||||
if err != nil {
|
||||
|
@ -80,7 +80,7 @@ func initTierDeletionJournal(ctx context.Context) (*tierJournal, error) {
|
||||
return j, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("no local disk found")
|
||||
return nil, errors.New("no local drive found")
|
||||
}
|
||||
|
||||
// rotate rotates the journal. If a read-only journal already exists it does
|
||||
|
@ -57,10 +57,10 @@ var errInvalidRangeSource = errors.New("Range specified exceeds source object si
|
||||
|
||||
// error returned by disks which are to be initialized are waiting for the
|
||||
// first server to initialize them in distributed set to initialize them.
|
||||
var errNotFirstDisk = errors.New("Not first disk")
|
||||
var errNotFirstDisk = errors.New("Not first drive")
|
||||
|
||||
// error returned by first disk waiting to initialize other servers.
|
||||
var errFirstDiskWait = errors.New("Waiting on other disks")
|
||||
var errFirstDiskWait = errors.New("Waiting on other drives")
|
||||
|
||||
// error returned for a negative actual size.
|
||||
var errInvalidDecompressedSize = errors.New("Invalid Decompressed Size")
|
||||
|
@ -757,7 +757,7 @@ func (p *xlStorageDiskIDCheck) checkHealth(ctx context.Context) (err error) {
|
||||
t = time.Since(time.Unix(0, atomic.LoadInt64(&p.health.lastSuccess)))
|
||||
if t > maxTimeSinceLastSuccess {
|
||||
if atomic.CompareAndSwapInt32(&p.health.status, diskHealthOK, diskHealthFaulty) {
|
||||
logger.LogAlwaysIf(ctx, fmt.Errorf("taking disk %s offline, time since last response %v", p.storage.String(), t.Round(time.Millisecond)))
|
||||
logger.LogAlwaysIf(ctx, fmt.Errorf("taking drive %s offline, time since last response %v", p.storage.String(), t.Round(time.Millisecond)))
|
||||
go p.monitorDiskStatus()
|
||||
}
|
||||
return errFaultyDisk
|
||||
@ -789,7 +789,7 @@ func (p *xlStorageDiskIDCheck) monitorDiskStatus() {
|
||||
Force: false,
|
||||
})
|
||||
if err == nil {
|
||||
logger.Info("Able to read+write, bringing disk %s online.", p.storage.String())
|
||||
logger.Info("Able to read+write, bringing drive %s online.", p.storage.String())
|
||||
atomic.StoreInt32(&p.health.status, diskHealthOK)
|
||||
return
|
||||
}
|
||||
|
@ -659,7 +659,7 @@ func TestXLStorageDeleteVol(t *testing.T) {
|
||||
// should fail with disk not found.
|
||||
err = xlStorageDeletedStorage.DeleteVol(context.Background(), "Del-Vol", false)
|
||||
if err != errDiskNotFound {
|
||||
t.Errorf("Expected: \"Disk not found\", got \"%s\"", err)
|
||||
t.Errorf("Expected: \"Drive not found\", got \"%s\"", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -723,7 +723,7 @@ func TestXLStorageStatVol(t *testing.T) {
|
||||
// should fail with disk not found.
|
||||
_, err = xlStorageDeletedStorage.StatVol(context.Background(), "Stat vol")
|
||||
if err != errDiskNotFound {
|
||||
t.Errorf("Expected: \"Disk not found\", got \"%s\"", err)
|
||||
t.Errorf("Expected: \"Drive not found\", got \"%s\"", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -902,7 +902,7 @@ func TestXLStorageListDir(t *testing.T) {
|
||||
Force: false,
|
||||
})
|
||||
if err != errDiskNotFound {
|
||||
t.Errorf("Expected: \"Disk not found\", got \"%s\"", err)
|
||||
t.Errorf("Expected: \"Drive not found\", got \"%s\"", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1044,7 +1044,7 @@ func TestXLStorageDeleteFile(t *testing.T) {
|
||||
Force: false,
|
||||
})
|
||||
if err != errDiskNotFound {
|
||||
t.Errorf("Expected: \"Disk not found\", got \"%s\"", err)
|
||||
t.Errorf("Expected: \"Drive not found\", got \"%s\"", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ NOTE:
|
||||
"hosts": {
|
||||
"poolId": 1,
|
||||
"setId": 1,
|
||||
"disks": [
|
||||
"drives": [
|
||||
"/mnt/data1",
|
||||
"/mnt/data2",
|
||||
"/mnt/data3",
|
||||
|
@ -838,7 +838,7 @@
|
||||
"instant": true,
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
"legendFormat": "Total online disks in MinIO Cluster",
|
||||
"legendFormat": "Total online drives in MinIO Cluster",
|
||||
"metric": "process_start_time_seconds",
|
||||
"refId": "A",
|
||||
"step": 60
|
||||
@ -846,7 +846,7 @@
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Total Online Disks",
|
||||
"title": "Total Online Drives",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
@ -1276,7 +1276,7 @@
|
||||
],
|
||||
"timeFrom": null,
|
||||
"timeShift": null,
|
||||
"title": "Total Offline Disks",
|
||||
"title": "Total Offline Drives",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
@ -2453,7 +2453,7 @@
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "${DS_PROMETHEUS}",
|
||||
"description": "Number of online disks per MinIO Server",
|
||||
"description": "Number of online drives per MinIO Server",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"links": []
|
||||
|
@ -214,7 +214,7 @@ Example 1:
|
||||
|
||||
ErrUnsupportedBackend = newErrFn(
|
||||
"Unable to write to the backend",
|
||||
"Please ensure your disk supports O_DIRECT",
|
||||
"Please ensure your drive supports O_DIRECT",
|
||||
"",
|
||||
)
|
||||
|
||||
|
@ -132,12 +132,12 @@ func (sc *StorageClass) String() string {
|
||||
}
|
||||
|
||||
// Parses given storageClassEnv and returns a storageClass structure.
|
||||
// Supported Storage Class format is "Scheme:Number of parity disks".
|
||||
// Supported Storage Class format is "Scheme:Number of parity drives".
|
||||
// Currently only supported scheme is "EC".
|
||||
func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
|
||||
s := strings.Split(storageClassEnv, ":")
|
||||
|
||||
// only two elements allowed in the string - "scheme" and "number of parity disks"
|
||||
// only two elements allowed in the string - "scheme" and "number of parity drives"
|
||||
if len(s) > 2 {
|
||||
return StorageClass{}, config.ErrStorageClassValue(nil).Msg("Too many sections in " + storageClassEnv)
|
||||
} else if len(s) < 2 {
|
||||
@ -203,7 +203,7 @@ func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
|
||||
|
||||
if ssParity > 0 && rrsParity > 0 {
|
||||
if ssParity > 0 && ssParity < rrsParity {
|
||||
return fmt.Errorf("Standard storage class parity disks %d should be greater than or equal to Reduced redundancy storage class parity disks %d", ssParity, rrsParity)
|
||||
return fmt.Errorf("Standard storage class parity drives %d should be greater than or equal to Reduced redundancy storage class parity drives %d", ssParity, rrsParity)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -158,11 +158,11 @@ func TestParityCount(t *testing.T) {
|
||||
}
|
||||
parity := scfg.GetParityForSC(tt.sc)
|
||||
if (tt.disksCount - parity) != tt.expectedData {
|
||||
t.Errorf("Test %d, Expected data disks %d, got %d", i+1, tt.expectedData, tt.disksCount-parity)
|
||||
t.Errorf("Test %d, Expected data drives %d, got %d", i+1, tt.expectedData, tt.disksCount-parity)
|
||||
continue
|
||||
}
|
||||
if parity != tt.expectedParity {
|
||||
t.Errorf("Test %d, Expected parity disks %d, got %d", i+1, tt.expectedParity, parity)
|
||||
t.Errorf("Test %d, Expected parity drives %d, got %d", i+1, tt.expectedParity, parity)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
FSType: getFSType(s.Fstypename[:]),
|
||||
}
|
||||
if info.Free > info.Total {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
}
|
||||
info.Used = info.Total - info.Free
|
||||
return info, nil
|
||||
|
@ -41,7 +41,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
FSType: getFSType(s.Fstypename[:]),
|
||||
}
|
||||
if info.Free > info.Total {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
}
|
||||
info.Used = info.Total - info.Free
|
||||
return info, nil
|
||||
|
@ -46,7 +46,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
// XFS can show wrong values at times error out
|
||||
// in such scenarios.
|
||||
if info.Free > info.Total {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
}
|
||||
info.Used = info.Total - info.Free
|
||||
return info, nil
|
||||
|
@ -76,7 +76,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
// XFS can show wrong values at times error out
|
||||
// in such scenarios.
|
||||
if info.Free > info.Total {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
}
|
||||
info.Used = info.Total - info.Free
|
||||
return info, nil
|
||||
|
@ -76,7 +76,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
// XFS can show wrong values at times error out
|
||||
// in such scenarios.
|
||||
if info.Free > info.Total {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
}
|
||||
info.Used = info.Total - info.Free
|
||||
return info, nil
|
||||
|
@ -41,7 +41,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
FSType: string(s.Fstypename[:]),
|
||||
}
|
||||
if info.Free > info.Total {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
}
|
||||
info.Used = info.Total - info.Free
|
||||
return info, nil
|
||||
|
@ -41,7 +41,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
FSType: getFSType(s.F_fstypename[:]),
|
||||
}
|
||||
if info.Free > info.Total {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
}
|
||||
info.Used = info.Total - info.Free
|
||||
return info, nil
|
||||
|
@ -41,7 +41,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
FSType: getFSType(s.Fstr[:]),
|
||||
}
|
||||
if info.Free > info.Total {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
|
||||
}
|
||||
info.Used = info.Total - info.Free
|
||||
return info, nil
|
||||
|
@ -70,7 +70,7 @@ func GetInfo(path string) (info Info, err error) {
|
||||
uintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes)))
|
||||
|
||||
if uint64(lpTotalNumberOfFreeBytes) > uint64(lpTotalNumberOfBytes) {
|
||||
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'",
|
||||
return info, fmt.Errorf("detected free space (%d) > total drive space (%d), fs corruption at (%s). please run 'fsck'",
|
||||
uint64(lpTotalNumberOfFreeBytes), uint64(lpTotalNumberOfBytes), path)
|
||||
}
|
||||
|
||||
|
@ -65,5 +65,5 @@ func ansiRestoreAttributes() {
|
||||
|
||||
// logIgnoreError if true,the error will ignore.
|
||||
func logIgnoreError(err error) bool {
|
||||
return err == nil || errors.Is(err, context.Canceled) || errors.Is(err, http.ErrServerClosed) || err.Error() == "disk not found"
|
||||
return err == nil || errors.Is(err, context.Canceled) || errors.Is(err, http.ErrServerClosed) || err.Error() == "drive not found"
|
||||
}
|
||||
|
@ -263,7 +263,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
|
||||
// fully it should make sure to respond with '412'
|
||||
// instead, see cmd/storage-rest-server.go for ideas.
|
||||
if c.HealthCheckFn != nil && resp.StatusCode == http.StatusPreconditionFailed {
|
||||
err = fmt.Errorf("Marking %s offline temporarily; caused by PreconditionFailed with disk ID mismatch", c.url.Host)
|
||||
err = fmt.Errorf("Marking %s offline temporarily; caused by PreconditionFailed with drive ID mismatch", c.url.Host)
|
||||
logger.LogOnceIf(ctx, err, c.url.Host)
|
||||
c.MarkOffline(err)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user