mirror of
https://github.com/minio/minio.git
synced 2024-12-24 22:25:54 -05:00
fix: simplify diskMap usage to keep certain checks predictable (#12519)
Bonus: also make sure that we Sanitize() the drives only during startup of the server, but not during disk reconnects.
This commit is contained in:
parent
a6cbfc3600
commit
4669d19f2a
@ -102,7 +102,7 @@ type erasureSets struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return false if endpoint is not connected or has been reconnected after last check
|
// Return false if endpoint is not connected or has been reconnected after last check
|
||||||
func isEndpointConnectionStable(diskMap map[string]StorageAPI, endpoint string, lastCheck time.Time) bool {
|
func isEndpointConnectionStable(diskMap map[Endpoint]StorageAPI, endpoint Endpoint, lastCheck time.Time) bool {
|
||||||
disk := diskMap[endpoint]
|
disk := diskMap[endpoint]
|
||||||
if disk == nil {
|
if disk == nil {
|
||||||
return false
|
return false
|
||||||
@ -116,8 +116,8 @@ func isEndpointConnectionStable(diskMap map[string]StorageAPI, endpoint string,
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *erasureSets) getDiskMap() map[string]StorageAPI {
|
func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI {
|
||||||
diskMap := make(map[string]StorageAPI)
|
diskMap := make(map[Endpoint]StorageAPI)
|
||||||
|
|
||||||
s.erasureDisksMu.RLock()
|
s.erasureDisksMu.RLock()
|
||||||
defer s.erasureDisksMu.RUnlock()
|
defer s.erasureDisksMu.RUnlock()
|
||||||
@ -131,7 +131,7 @@ func (s *erasureSets) getDiskMap() map[string]StorageAPI {
|
|||||||
if !disk.IsOnline() {
|
if !disk.IsOnline() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
diskMap[disk.String()] = disk
|
diskMap[disk.Endpoint()] = disk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return diskMap
|
return diskMap
|
||||||
@ -213,11 +213,7 @@ func (s *erasureSets) connectDisks() {
|
|||||||
var setsJustConnected = make([]bool, s.setCount)
|
var setsJustConnected = make([]bool, s.setCount)
|
||||||
diskMap := s.getDiskMap()
|
diskMap := s.getDiskMap()
|
||||||
for _, endpoint := range s.endpoints {
|
for _, endpoint := range s.endpoints {
|
||||||
diskPath := endpoint.String()
|
if isEndpointConnectionStable(diskMap, endpoint, s.lastConnectDisksOpTime) {
|
||||||
if endpoint.IsLocal {
|
|
||||||
diskPath = endpoint.Path
|
|
||||||
}
|
|
||||||
if isEndpointConnectionStable(diskMap, diskPath, s.lastConnectDisksOpTime) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
@ -72,16 +72,16 @@ var printEndpointError = func() func(Endpoint, error, bool) {
|
|||||||
// Cleans up tmp directory of the local disk.
|
// Cleans up tmp directory of the local disk.
|
||||||
func formatErasureCleanupTmp(diskPath string) error {
|
func formatErasureCleanupTmp(diskPath string) error {
|
||||||
// Need to move temporary objects left behind from previous run of minio
|
// Need to move temporary objects left behind from previous run of minio
|
||||||
// server to a unique directory under `minioMetaTmpBucket-old` to clean
|
// server to a unique directory under `minioMetaTmpBucket/.trash` to clean
|
||||||
// up `minioMetaTmpBucket` for the current run.
|
// up `minioMetaTmpBucket` for the current run.
|
||||||
//
|
//
|
||||||
// /disk1/.minio.sys/tmp-old/
|
// /disk1/.minio.sys/tmp/.trash/uuid
|
||||||
// |__ 33a58b40-aecc-4c9f-a22f-ff17bfa33b62
|
// |__ 33a58b40-aecc-4c9f-a22f-ff17bfa33b62
|
||||||
// |__ e870a2c1-d09c-450c-a69c-6eaa54a89b3e
|
// |__ e870a2c1-d09c-450c-a69c-6eaa54a89b3e
|
||||||
//
|
//
|
||||||
// In this example, `33a58b40-aecc-4c9f-a22f-ff17bfa33b62` directory contains
|
// In this example, `33a58b40-aecc-4c9f-a22f-ff17bfa33b62` directory contains
|
||||||
// temporary objects from one of the previous runs of minio server.
|
// temporary objects from one of the previous runs of minio server.
|
||||||
tmpOld := pathJoin(diskPath, minioMetaTmpBucket+"-old", mustGetUUID())
|
tmpOld := pathJoin(diskPath, minioMetaTmpDeletedBucket, mustGetUUID())
|
||||||
if err := renameAll(pathJoin(diskPath, minioMetaTmpBucket),
|
if err := renameAll(pathJoin(diskPath, minioMetaTmpBucket),
|
||||||
tmpOld); err != nil && err != errFileNotFound {
|
tmpOld); err != nil && err != errFileNotFound {
|
||||||
logger.LogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate",
|
logger.LogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate",
|
||||||
@ -94,7 +94,7 @@ func formatErasureCleanupTmp(diskPath string) error {
|
|||||||
renameAllBucketMetacache(diskPath)
|
renameAllBucketMetacache(diskPath)
|
||||||
|
|
||||||
// Removal of tmp-old folder is backgrounded completely.
|
// Removal of tmp-old folder is backgrounded completely.
|
||||||
go removeAll(pathJoin(diskPath, minioMetaTmpBucket+"-old"))
|
go removeAll(tmpOld)
|
||||||
|
|
||||||
if err := mkdirAll(pathJoin(diskPath, minioMetaTmpBucket), 0777); err != nil {
|
if err := mkdirAll(pathJoin(diskPath, minioMetaTmpBucket), 0777); err != nil {
|
||||||
logger.LogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty please investigate",
|
logger.LogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty please investigate",
|
||||||
@ -178,6 +178,13 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints,
|
|||||||
}
|
}
|
||||||
}(storageDisks)
|
}(storageDisks)
|
||||||
|
|
||||||
|
// Sanitize all local disks during server startup.
|
||||||
|
for _, disk := range storageDisks {
|
||||||
|
if disk != nil && disk.IsLocal() {
|
||||||
|
disk.(*xlStorageDiskIDCheck).storage.(*xlStorage).Sanitize()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for i, err := range errs {
|
for i, err := range errs {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errDiskNotFound && retryCount >= 5 {
|
if err == errDiskNotFound && retryCount >= 5 {
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
const (
|
const (
|
||||||
storageRESTVersion = "v36" // Changes to FileInfo for tier-journal
|
storageRESTVersion = "v37" // cleanup behavior change at storage layer.
|
||||||
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
|
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
|
||||||
storageRESTPrefix = minioReservedBucketPath + "/storage"
|
storageRESTPrefix = minioReservedBucketPath + "/storage"
|
||||||
)
|
)
|
||||||
|
@ -233,6 +233,16 @@ func newLocalXLStorage(path string) (*xlStorage, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sanitize - sanitizes the `format.json`, cleanup tmp.
|
||||||
|
// all other future cleanups should be added here.
|
||||||
|
func (s *xlStorage) Sanitize() error {
|
||||||
|
if err := formatErasureMigrate(s.diskPath); err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return formatErasureCleanupTmp(s.diskPath)
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize a new storage disk.
|
// Initialize a new storage disk.
|
||||||
func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
||||||
path := ep.Path
|
path := ep.Path
|
||||||
@ -302,14 +312,6 @@ func newXLStorage(ep Endpoint) (*xlStorage, error) {
|
|||||||
w.Close()
|
w.Close()
|
||||||
Remove(filePath)
|
Remove(filePath)
|
||||||
|
|
||||||
if err := formatErasureMigrate(p.diskPath); err != nil && !errors.Is(err, os.ErrNotExist) {
|
|
||||||
return p, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := formatErasureCleanupTmp(p.diskPath); err != nil {
|
|
||||||
return p, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Success.
|
// Success.
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user