2016-08-30 22:22:27 -04:00
|
|
|
/*
|
2019-04-09 14:39:42 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
|
2016-08-30 22:22:27 -04:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2020-08-04 17:55:53 -04:00
|
|
|
"context"
|
2020-01-14 21:45:17 -05:00
|
|
|
"crypto/tls"
|
2020-11-23 11:36:49 -05:00
|
|
|
"errors"
|
2017-06-17 14:20:12 -04:00
|
|
|
"fmt"
|
2020-01-14 21:45:17 -05:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2018-02-15 20:45:57 -05:00
|
|
|
"os"
|
2019-10-30 03:04:39 -04:00
|
|
|
"sync"
|
2016-08-30 22:22:27 -04:00
|
|
|
"time"
|
|
|
|
|
2020-05-13 23:25:29 -04:00
|
|
|
"github.com/dustin/go-humanize"
|
2020-01-14 21:45:17 -05:00
|
|
|
xhttp "github.com/minio/minio/cmd/http"
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2019-01-25 16:33:28 -05:00
|
|
|
"github.com/minio/minio/pkg/sync/errgroup"
|
2016-08-30 22:22:27 -04:00
|
|
|
)
|
|
|
|
|
2020-07-17 13:08:04 -04:00
|
|
|
var printEndpointError = func() func(Endpoint, error, bool) {
|
2019-10-30 03:04:39 -04:00
|
|
|
var mutex sync.Mutex
|
2020-07-17 13:08:04 -04:00
|
|
|
printOnce := make(map[Endpoint]map[string]int)
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-07-17 13:08:04 -04:00
|
|
|
return func(endpoint Endpoint, err error, once bool) {
|
2018-09-14 00:42:50 -04:00
|
|
|
reqInfo := (&logger.ReqInfo{}).AppendTags("endpoint", endpoint.String())
|
2020-04-09 12:30:02 -04:00
|
|
|
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
|
2019-10-30 03:04:39 -04:00
|
|
|
mutex.Lock()
|
|
|
|
defer mutex.Unlock()
|
2020-07-17 13:08:04 -04:00
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
m, ok := printOnce[endpoint]
|
|
|
|
if !ok {
|
2020-07-17 13:08:04 -04:00
|
|
|
m = make(map[string]int)
|
|
|
|
m[err.Error()]++
|
2018-02-15 20:45:57 -05:00
|
|
|
printOnce[endpoint] = m
|
2020-07-17 13:08:04 -04:00
|
|
|
if once {
|
|
|
|
logger.LogAlwaysIf(ctx, err)
|
|
|
|
return
|
|
|
|
}
|
2017-12-28 12:32:48 -05:00
|
|
|
}
|
2020-07-17 13:08:04 -04:00
|
|
|
// Once is set and we are here means error was already
|
|
|
|
// printed once.
|
|
|
|
if once {
|
2018-02-15 20:45:57 -05:00
|
|
|
return
|
2017-12-28 12:32:48 -05:00
|
|
|
}
|
2020-07-17 13:08:04 -04:00
|
|
|
// once not set, check if same error occurred 3 times in
|
|
|
|
// a row, then make sure we print it to call attention.
|
|
|
|
if m[err.Error()] > 2 {
|
|
|
|
logger.LogAlwaysIf(ctx, fmt.Errorf("Following error has been printed %d times.. %w", m[err.Error()], err))
|
|
|
|
// Reduce the count to introduce further delay in printing
|
|
|
|
// but let it again print after the 2th attempt
|
|
|
|
m[err.Error()]--
|
|
|
|
m[err.Error()]--
|
|
|
|
}
|
|
|
|
m[err.Error()]++
|
2017-12-28 12:32:48 -05:00
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
}()
|
2016-08-30 22:22:27 -04:00
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
// Migrates backend format of local disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
func formatErasureMigrateLocalEndpoints(endpoints Endpoints) error {
|
2019-01-25 16:33:28 -05:00
|
|
|
g := errgroup.WithNErrs(len(endpoints))
|
|
|
|
for index, endpoint := range endpoints {
|
2018-02-15 20:45:57 -05:00
|
|
|
if !endpoint.IsLocal {
|
|
|
|
continue
|
2016-08-30 22:22:27 -04:00
|
|
|
}
|
2019-01-25 16:33:28 -05:00
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
epPath := endpoints[index].Path
|
2020-11-23 11:36:49 -05:00
|
|
|
err := formatErasureMigrate(epPath)
|
|
|
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
|
|
|
return err
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2020-11-23 11:36:49 -05:00
|
|
|
return nil
|
2019-01-25 16:33:28 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
for _, err := range g.Wait() {
|
|
|
|
if err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return err
|
2016-11-23 18:48:10 -05:00
|
|
|
}
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
return nil
|
2016-11-23 18:48:10 -05:00
|
|
|
}
|
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
// Cleans up tmp directory of local disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
func formatErasureCleanupTmpLocalEndpoints(endpoints Endpoints) error {
|
2019-01-25 16:33:28 -05:00
|
|
|
g := errgroup.WithNErrs(len(endpoints))
|
|
|
|
for index, endpoint := range endpoints {
|
2018-03-15 16:55:23 -04:00
|
|
|
if !endpoint.IsLocal {
|
|
|
|
continue
|
|
|
|
}
|
2019-01-25 16:33:28 -05:00
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
epPath := endpoints[index].Path
|
|
|
|
// Need to move temporary objects left behind from previous run of minio
|
|
|
|
// server to a unique directory under `minioMetaTmpBucket-old` to clean
|
|
|
|
// up `minioMetaTmpBucket` for the current run.
|
|
|
|
//
|
|
|
|
// /disk1/.minio.sys/tmp-old/
|
|
|
|
// |__ 33a58b40-aecc-4c9f-a22f-ff17bfa33b62
|
|
|
|
// |__ e870a2c1-d09c-450c-a69c-6eaa54a89b3e
|
|
|
|
//
|
|
|
|
// In this example, `33a58b40-aecc-4c9f-a22f-ff17bfa33b62` directory contains
|
|
|
|
// temporary objects from one of the previous runs of minio server.
|
2019-08-05 14:41:29 -04:00
|
|
|
tmpOld := pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID())
|
2019-01-25 16:33:28 -05:00
|
|
|
if err := renameAll(pathJoin(epPath, minioMetaTmpBucket),
|
2019-08-05 14:41:29 -04:00
|
|
|
tmpOld); err != nil && err != errFileNotFound {
|
2019-12-02 12:28:01 -05:00
|
|
|
return fmt.Errorf("unable to rename (%s -> %s) %w",
|
2019-08-05 14:41:29 -04:00
|
|
|
pathJoin(epPath, minioMetaTmpBucket),
|
|
|
|
tmpOld,
|
2020-09-28 22:39:32 -04:00
|
|
|
osErrToFileErr(err))
|
2019-01-25 16:33:28 -05:00
|
|
|
}
|
|
|
|
|
2021-02-11 13:22:03 -05:00
|
|
|
// Renames and schedules for puring all bucket metacache.
|
|
|
|
renameAllBucketMetacache(epPath)
|
2020-12-14 15:07:07 -05:00
|
|
|
|
2019-01-25 16:33:28 -05:00
|
|
|
// Removal of tmp-old folder is backgrounded completely.
|
|
|
|
go removeAll(pathJoin(epPath, minioMetaTmpBucket+"-old"))
|
|
|
|
|
2019-08-05 14:41:29 -04:00
|
|
|
if err := mkdirAll(pathJoin(epPath, minioMetaTmpBucket), 0777); err != nil {
|
2019-12-02 12:28:01 -05:00
|
|
|
return fmt.Errorf("unable to create (%s) %w",
|
2019-08-05 14:41:29 -04:00
|
|
|
pathJoin(epPath, minioMetaTmpBucket),
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
return nil
|
2019-01-25 16:33:28 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
for _, err := range g.Wait() {
|
|
|
|
if err != nil {
|
2018-03-15 16:55:23 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-04-04 00:58:48 -04:00
|
|
|
// Following error message is added to fix a regression in release
|
|
|
|
// RELEASE.2018-03-16T22-52-12Z after migrating v1 to v2 to v3. This
|
|
|
|
// migration failed to capture '.This' field properly which indicates
|
|
|
|
// the disk UUID association. Below error message is returned when
|
|
|
|
// we see this situation in format.json, for more info refer
|
|
|
|
// https://github.com/minio/minio/issues/5667
|
2020-06-12 23:04:01 -04:00
|
|
|
var errErasureV3ThisEmpty = fmt.Errorf("Erasure format version 3 has This field empty")
|
2018-04-04 00:58:48 -04:00
|
|
|
|
2021-01-07 22:16:18 -05:00
|
|
|
// isServerResolvable - checks if the endpoint is resolvable
|
2020-01-14 21:45:17 -05:00
|
|
|
// by sending a naked HTTP request with liveness checks.
|
2021-01-25 13:01:27 -05:00
|
|
|
func isServerResolvable(endpoint Endpoint, timeout time.Duration) error {
|
2020-01-14 21:45:17 -05:00
|
|
|
serverURL := &url.URL{
|
|
|
|
Scheme: endpoint.Scheme,
|
|
|
|
Host: endpoint.Host,
|
2020-10-30 15:20:28 -04:00
|
|
|
Path: pathJoin(healthCheckPathPrefix, healthCheckLivenessPath),
|
2020-01-14 21:45:17 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var tlsConfig *tls.Config
|
2020-12-22 00:42:38 -05:00
|
|
|
if globalIsTLS {
|
2020-01-14 21:45:17 -05:00
|
|
|
tlsConfig = &tls.Config{
|
2021-02-22 16:19:44 -05:00
|
|
|
RootCAs: globalRootCAs,
|
2020-01-14 21:45:17 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
httpClient := &http.Client{
|
2020-07-13 12:51:07 -04:00
|
|
|
Transport:
|
|
|
|
// For more details about various values used here refer
|
|
|
|
// https://golang.org/pkg/net/http/#Transport documentation
|
|
|
|
&http.Transport{
|
|
|
|
Proxy: http.ProxyFromEnvironment,
|
|
|
|
DialContext: xhttp.NewCustomDialContext(3 * time.Second),
|
2020-10-30 15:20:28 -04:00
|
|
|
ResponseHeaderTimeout: 3 * time.Second,
|
|
|
|
TLSHandshakeTimeout: 3 * time.Second,
|
|
|
|
ExpectContinueTimeout: 3 * time.Second,
|
2020-07-13 12:51:07 -04:00
|
|
|
TLSClientConfig: tlsConfig,
|
|
|
|
// Go net/http automatically unzip if content-type is
|
|
|
|
// gzip disable this feature, as we are always interested
|
|
|
|
// in raw stream.
|
|
|
|
DisableCompression: true,
|
|
|
|
},
|
2020-01-14 21:45:17 -05:00
|
|
|
}
|
2020-04-17 14:20:56 -04:00
|
|
|
defer httpClient.CloseIdleConnections()
|
2020-01-14 21:45:17 -05:00
|
|
|
|
2021-01-25 13:01:27 -05:00
|
|
|
ctx, cancel := context.WithTimeout(GlobalContext, timeout)
|
2020-08-04 17:55:53 -04:00
|
|
|
|
2020-09-08 17:22:04 -04:00
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, serverURL.String(), nil)
|
|
|
|
if err != nil {
|
2020-10-30 15:20:28 -04:00
|
|
|
cancel()
|
2020-09-08 17:22:04 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := httpClient.Do(req)
|
2020-10-30 15:20:28 -04:00
|
|
|
cancel()
|
2020-01-14 21:45:17 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-10-30 15:20:28 -04:00
|
|
|
xhttp.DrainBody(resp.Body)
|
2020-01-14 21:45:17 -05:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// connect to list of endpoints and load all Erasure disk formats, validate the formats are correct
|
2018-04-04 00:58:48 -04:00
|
|
|
// and are in quorum, if no formats are found attempt to initialize all of them for the first
|
|
|
|
// time. additionally make sure to close all the disks used in this attempt.
|
2021-01-19 13:01:31 -05:00
|
|
|
func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID, distributionAlgo string) (storageDisks []StorageAPI, format *formatErasureV3, err error) {
|
2018-09-10 19:21:59 -04:00
|
|
|
// Initialize all storage disks
|
2019-09-27 19:47:12 -04:00
|
|
|
storageDisks, errs := initStorageDisksWithErrors(endpoints)
|
2020-01-14 21:45:17 -05:00
|
|
|
|
2020-04-03 21:06:31 -04:00
|
|
|
defer func(storageDisks []StorageAPI) {
|
|
|
|
if err != nil {
|
|
|
|
closeStorageDisks(storageDisks)
|
|
|
|
}
|
|
|
|
}(storageDisks)
|
|
|
|
|
2019-09-27 19:47:12 -04:00
|
|
|
for i, err := range errs {
|
2020-01-14 21:45:17 -05:00
|
|
|
if err != nil {
|
|
|
|
if err != errDiskNotFound {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, fmt.Errorf("Disk %s: %w", endpoints[i], err)
|
2020-01-14 21:45:17 -05:00
|
|
|
}
|
|
|
|
if retryCount >= 5 {
|
2021-01-25 13:01:27 -05:00
|
|
|
logger.Info("Unable to connect to %s: %v\n", endpoints[i], isServerResolvable(endpoints[i], time.Second))
|
2020-01-14 21:45:17 -05:00
|
|
|
}
|
2019-09-27 19:47:12 -04:00
|
|
|
}
|
|
|
|
}
|
2018-04-04 00:58:48 -04:00
|
|
|
|
2018-12-04 13:25:56 -05:00
|
|
|
// Attempt to load all `format.json` from all disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
formatConfigs, sErrs := loadFormatErasureAll(storageDisks, false)
|
2018-12-04 13:25:56 -05:00
|
|
|
// Check if we have
|
|
|
|
for i, sErr := range sErrs {
|
2020-08-03 21:17:48 -04:00
|
|
|
// print the error, nonetheless, which is perhaps unhandled
|
2020-03-08 15:12:07 -04:00
|
|
|
if sErr != errUnformattedDisk && sErr != errDiskNotFound && retryCount >= 5 {
|
2020-03-23 10:32:18 -04:00
|
|
|
if sErr != nil {
|
|
|
|
logger.Info("Unable to read 'format.json' from %s: %v\n", endpoints[i], sErr)
|
|
|
|
}
|
2020-03-08 15:12:07 -04:00
|
|
|
}
|
2018-12-04 13:25:56 -05:00
|
|
|
}
|
|
|
|
|
2018-04-04 00:58:48 -04:00
|
|
|
// Pre-emptively check if one of the formatted disks
|
|
|
|
// is invalid. This function returns success for the
|
|
|
|
// most part unless one of the formats is not consistent
|
2020-06-12 23:04:01 -04:00
|
|
|
// with expected Erasure format. For example if a user is
|
|
|
|
// trying to pool FS backend into an Erasure set.
|
2021-01-29 14:40:55 -05:00
|
|
|
if err = checkFormatErasureValues(formatConfigs, storageDisks, setDriveCount); err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2018-04-04 00:58:48 -04:00
|
|
|
}
|
|
|
|
|
2018-04-12 18:43:38 -04:00
|
|
|
// All disks report unformatted we should initialized everyone.
|
2020-06-12 23:04:01 -04:00
|
|
|
if shouldInitErasureDisks(sErrs) && firstDisk {
|
2021-01-06 12:35:47 -05:00
|
|
|
logger.Info("Formatting %s pool, %v set(s), %v drives per set.",
|
|
|
|
humanize.Ordinal(poolCount), setCount, setDriveCount)
|
2020-01-17 18:39:07 -05:00
|
|
|
|
2019-04-02 13:50:13 -04:00
|
|
|
// Initialize erasure code format on disks
|
2021-01-19 13:01:31 -05:00
|
|
|
format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, distributionAlgo, sErrs)
|
2019-04-02 13:50:13 -04:00
|
|
|
if err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2019-04-02 13:50:13 -04:00
|
|
|
}
|
2020-05-13 23:25:29 -04:00
|
|
|
|
2019-04-02 13:50:13 -04:00
|
|
|
// Assign globalDeploymentID on first run for the
|
|
|
|
// minio server managing the first disk
|
|
|
|
globalDeploymentID = format.ID
|
2020-03-27 17:48:30 -04:00
|
|
|
return storageDisks, format, nil
|
2019-12-23 19:31:03 -05:00
|
|
|
}
|
|
|
|
|
2018-04-12 18:43:38 -04:00
|
|
|
// Return error when quorum unformatted disks - indicating we are
|
|
|
|
// waiting for first server to be online.
|
|
|
|
if quorumUnformattedDisks(sErrs) && !firstDisk {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, errNotFirstDisk
|
2018-04-12 18:43:38 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return error when quorum unformatted disks but waiting for rest
|
|
|
|
// of the servers to be online.
|
|
|
|
if quorumUnformattedDisks(sErrs) && firstDisk {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, errFirstDiskWait
|
2018-04-12 18:43:38 -04:00
|
|
|
}
|
|
|
|
|
2020-08-18 17:37:26 -04:00
|
|
|
// Mark all root disks down
|
|
|
|
markRootDisksAsDown(storageDisks, sErrs)
|
|
|
|
|
2018-04-04 00:58:48 -04:00
|
|
|
// Following function is added to fix a regressions which was introduced
|
|
|
|
// in release RELEASE.2018-03-16T22-52-12Z after migrating v1 to v2 to v3.
|
|
|
|
// This migration failed to capture '.This' field properly which indicates
|
|
|
|
// the disk UUID association. Below function is called to handle and fix
|
|
|
|
// this regression, for more info refer https://github.com/minio/minio/issues/5667
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = fixFormatErasureV3(storageDisks, endpoints, formatConfigs); err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2018-04-04 00:58:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// If any of the .This field is still empty, we return error.
|
2020-06-12 23:04:01 -04:00
|
|
|
if formatErasureV3ThisEmpty(formatConfigs) {
|
|
|
|
return nil, nil, errErasureV3ThisEmpty
|
2018-04-04 00:58:48 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
format, err = getFormatErasureInQuorum(formatConfigs)
|
2018-04-04 00:58:48 -04:00
|
|
|
if err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2018-04-04 00:58:48 -04:00
|
|
|
}
|
|
|
|
|
2018-07-18 23:17:35 -04:00
|
|
|
if format.ID == "" {
|
2019-11-13 15:17:45 -05:00
|
|
|
// Not a first disk, wait until first disk fixes deploymentID
|
|
|
|
if !firstDisk {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, errNotFirstDisk
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = formatErasureFixDeploymentID(endpoints, storageDisks, format); err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2018-07-18 23:17:35 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-19 17:47:03 -05:00
|
|
|
globalDeploymentID = format.ID
|
2018-07-18 23:17:35 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if err = formatErasureFixLocalDeploymentID(endpoints, storageDisks, format); err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2018-07-18 23:17:35 -04:00
|
|
|
}
|
2019-12-23 19:31:03 -05:00
|
|
|
|
2020-01-15 15:36:52 -05:00
|
|
|
// The will always recreate some directories inside .minio.sys of
|
|
|
|
// the local disk such as tmp, multipart and background-ops
|
2020-06-12 23:04:01 -04:00
|
|
|
initErasureMetaVolumesInLocalDisks(storageDisks, formatConfigs)
|
2020-01-15 15:36:52 -05:00
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
return storageDisks, format, nil
|
2018-04-04 00:58:48 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Format disks before initialization of object layer.
|
2021-01-19 13:01:31 -05:00
|
|
|
func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID, distributionAlgo string) ([]StorageAPI, *formatErasureV3, error) {
|
2020-08-26 22:29:35 -04:00
|
|
|
if len(endpoints) == 0 || setCount == 0 || setDriveCount == 0 {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, errInvalidArgument
|
2016-11-02 11:51:06 -04:00
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if err := formatErasureMigrateLocalEndpoints(endpoints); err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2016-08-30 22:22:27 -04:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if err := formatErasureCleanupTmpLocalEndpoints(endpoints); err != nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2018-03-15 16:55:23 -04:00
|
|
|
}
|
|
|
|
|
2016-12-07 13:22:00 -05:00
|
|
|
// prepare getElapsedTime() to calculate elapsed time since we started trying formatting disks.
|
|
|
|
// All times are rounded to avoid showing milli, micro and nano seconds
|
|
|
|
formatStartTime := time.Now().Round(time.Second)
|
|
|
|
getElapsedTime := func() string {
|
|
|
|
return time.Now().Round(time.Second).Sub(formatStartTime).String()
|
|
|
|
}
|
|
|
|
|
2020-01-08 16:36:54 -05:00
|
|
|
// Wait on each try for an update.
|
|
|
|
ticker := time.NewTicker(500 * time.Millisecond)
|
|
|
|
defer ticker.Stop()
|
|
|
|
var tries int
|
2016-11-02 18:27:36 -04:00
|
|
|
for {
|
|
|
|
select {
|
2020-01-08 16:36:54 -05:00
|
|
|
case <-ticker.C:
|
2021-01-19 13:01:31 -05:00
|
|
|
storageDisks, format, err := connectLoadInitFormats(tries, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID, distributionAlgo)
|
2018-04-04 00:58:48 -04:00
|
|
|
if err != nil {
|
2020-01-08 16:36:54 -05:00
|
|
|
tries++
|
2018-04-04 00:58:48 -04:00
|
|
|
switch err {
|
|
|
|
case errNotFirstDisk:
|
|
|
|
// Fresh setup, wait for first server to be up.
|
2018-04-10 12:37:14 -04:00
|
|
|
logger.Info("Waiting for the first server to format the disks.")
|
2017-04-18 13:35:17 -04:00
|
|
|
continue
|
2018-04-12 18:43:38 -04:00
|
|
|
case errFirstDiskWait:
|
|
|
|
// Fresh setup, wait for other servers to come up.
|
|
|
|
logger.Info("Waiting for all other servers to be online to format the disks.")
|
|
|
|
continue
|
2020-06-12 23:04:01 -04:00
|
|
|
case errErasureReadQuorum:
|
2018-04-04 00:58:48 -04:00
|
|
|
// no quorum available continue to wait for minimum number of servers.
|
2018-04-10 12:37:14 -04:00
|
|
|
logger.Info("Waiting for a minimum of %d disks to come online (elapsed %s)\n", len(endpoints)/2, getElapsedTime())
|
2018-04-04 00:58:48 -04:00
|
|
|
continue
|
2020-06-12 23:04:01 -04:00
|
|
|
case errErasureV3ThisEmpty:
|
2018-04-04 00:58:48 -04:00
|
|
|
// need to wait for this error to be healed, so continue.
|
|
|
|
continue
|
|
|
|
default:
|
|
|
|
// For all other unhandled errors we exit and fail.
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, err
|
2017-04-18 13:35:17 -04:00
|
|
|
}
|
2016-12-11 18:18:55 -05:00
|
|
|
}
|
2020-03-27 17:48:30 -04:00
|
|
|
return storageDisks, format, nil
|
2018-02-06 18:07:17 -05:00
|
|
|
case <-globalOSSignalCh:
|
2020-03-27 17:48:30 -04:00
|
|
|
return nil, nil, fmt.Errorf("Initializing data volumes gracefully stopped")
|
2016-11-02 18:27:36 -04:00
|
|
|
}
|
|
|
|
}
|
2016-08-30 22:22:27 -04:00
|
|
|
}
|