mirror of https://github.com/minio/minio.git
fix: add stricter validation for erasure server pools (#11299)
During expansion we need to validate if - new deployment is expanded with newer constraints - existing deployment is expanded with older constraints - multiple server pools rejected if they have different deploymentID and distribution algo
This commit is contained in:
parent
b5049d541f
commit
1ad2b7b699
2
Makefile
2
Makefile
|
@ -38,7 +38,7 @@ fmt:
|
|||
lint:
|
||||
@echo "Running $@ check"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=10m --config ./.golangci.yml
|
||||
|
||||
ruleguard:
|
||||
@echo "Running $@ check"
|
||||
|
|
|
@ -363,19 +363,12 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
|||
}
|
||||
|
||||
var foundPrevLocal bool
|
||||
var commonParityDrives int
|
||||
|
||||
for _, arg := range args {
|
||||
setArgs, err := GetAllSets(arg)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
|
||||
parityDrives := ecDrivesNoConfig(len(setArgs[0]))
|
||||
if commonParityDrives != 0 && commonParityDrives != parityDrives {
|
||||
return nil, -1, fmt.Errorf("All serverPools should have same parity ratio - expected %d, got %d", commonParityDrives, parityDrives)
|
||||
}
|
||||
|
||||
endpointList, gotSetupType, err := CreateEndpoints(serverAddr, foundPrevLocal, setArgs...)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
|
@ -388,10 +381,6 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
|||
return nil, -1, err
|
||||
}
|
||||
foundPrevLocal = endpointList.atleastOneEndpointLocal()
|
||||
if commonParityDrives == 0 {
|
||||
commonParityDrives = ecDrivesNoConfig(len(setArgs[0]))
|
||||
}
|
||||
|
||||
if setupType == UnknownSetupType {
|
||||
setupType = gotSetupType
|
||||
}
|
||||
|
|
|
@ -55,8 +55,11 @@ func (z *erasureServerPools) SingleZone() bool {
|
|||
// Initialize new pool of erasure sets.
|
||||
func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) {
|
||||
var (
|
||||
deploymentID string
|
||||
err error
|
||||
deploymentID string
|
||||
distributionAlgo string
|
||||
commonParityDrives int
|
||||
drivesPerSet int
|
||||
err error
|
||||
|
||||
formats = make([]*formatErasureV3, len(endpointServerPools))
|
||||
storageDisks = make([][]StorageAPI, len(endpointServerPools))
|
||||
|
@ -72,15 +75,54 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
|||
localDrives = append(localDrives, endpoint.Path)
|
||||
}
|
||||
}
|
||||
|
||||
if drivesPerSet == 0 {
|
||||
drivesPerSet = ep.DrivesPerSet
|
||||
}
|
||||
|
||||
if commonParityDrives == 0 {
|
||||
commonParityDrives = ecDrivesNoConfig(ep.DrivesPerSet)
|
||||
}
|
||||
|
||||
// Once distribution algo is set, validate it for the next pool,
|
||||
// before proceeding to write to drives.
|
||||
switch distributionAlgo {
|
||||
case formatErasureVersionV3DistributionAlgoV2:
|
||||
if drivesPerSet != 0 && drivesPerSet != ep.DrivesPerSet {
|
||||
return nil, fmt.Errorf("All legacy serverPools should have same drive per set ratio - expected %d, got %d", drivesPerSet, ep.DrivesPerSet)
|
||||
}
|
||||
case formatErasureVersionV3DistributionAlgoV3:
|
||||
parityDrives := ecDrivesNoConfig(ep.DrivesPerSet)
|
||||
if commonParityDrives != 0 && commonParityDrives != parityDrives {
|
||||
return nil, fmt.Errorf("All current serverPools should have same parity ratio - expected %d, got %d", commonParityDrives, parityDrives)
|
||||
}
|
||||
}
|
||||
|
||||
storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1,
|
||||
ep.SetCount, ep.DrivesPerSet, deploymentID)
|
||||
ep.SetCount, ep.DrivesPerSet, deploymentID, distributionAlgo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if deploymentID == "" {
|
||||
// all zones should have same deployment ID
|
||||
deploymentID = formats[i].ID
|
||||
}
|
||||
|
||||
if distributionAlgo == "" {
|
||||
distributionAlgo = formats[i].Erasure.DistributionAlgo
|
||||
}
|
||||
|
||||
// Validate if users brought different DeploymentID pools.
|
||||
if deploymentID != formats[i].ID {
|
||||
return nil, fmt.Errorf("All serverPools should have same deployment ID expected %s, got %s", deploymentID, formats[i].ID)
|
||||
}
|
||||
|
||||
// Validate if users brought different different distribution algo pools.
|
||||
if distributionAlgo != formats[i].Erasure.DistributionAlgo {
|
||||
return nil, fmt.Errorf("All serverPools should have same distributionAlgo expected %s, got %s", distributionAlgo, formats[i].Erasure.DistributionAlgo)
|
||||
}
|
||||
|
||||
z.serverPools[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -173,18 +173,18 @@ func TestNewErasureSets(t *testing.T) {
|
|||
}
|
||||
|
||||
endpoints := mustGetNewEndpoints(erasureDisks...)
|
||||
_, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "")
|
||||
_, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "", "")
|
||||
if err != errInvalidArgument {
|
||||
t.Fatalf("Expecting error, got %s", err)
|
||||
}
|
||||
|
||||
_, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "")
|
||||
_, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "", "")
|
||||
if err != errInvalidArgument {
|
||||
t.Fatalf("Expecting error, got %s", err)
|
||||
}
|
||||
|
||||
// Initializes all erasure disks
|
||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
|
||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to format disks for erasure, %s", err)
|
||||
}
|
||||
|
|
|
@ -837,7 +837,7 @@ func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats
|
|||
}
|
||||
|
||||
// initFormatErasure - save Erasure format configuration on all disks.
|
||||
func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, setDriveCount int, deploymentID string, sErrs []error) (*formatErasureV3, error) {
|
||||
func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, setDriveCount int, deploymentID, distributionAlgo string, sErrs []error) (*formatErasureV3, error) {
|
||||
format := newFormatErasureV3(setCount, setDriveCount)
|
||||
formats := make([]*formatErasureV3, len(storageDisks))
|
||||
wantAtMost := ecDrivesNoConfig(setDriveCount)
|
||||
|
@ -848,6 +848,9 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
|
|||
disk := storageDisks[i*setDriveCount+j]
|
||||
newFormat := format.Clone()
|
||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||
if distributionAlgo != "" {
|
||||
newFormat.Erasure.DistributionAlgo = distributionAlgo
|
||||
}
|
||||
if deploymentID != "" {
|
||||
newFormat.ID = deploymentID
|
||||
}
|
||||
|
|
|
@ -228,7 +228,7 @@ func isServerResolvable(endpoint Endpoint) error {
|
|||
// connect to list of endpoints and load all Erasure disk formats, validate the formats are correct
|
||||
// and are in quorum, if no formats are found attempt to initialize all of them for the first
|
||||
// time. additionally make sure to close all the disks used in this attempt.
|
||||
func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) {
|
||||
func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID, distributionAlgo string) (storageDisks []StorageAPI, format *formatErasureV3, err error) {
|
||||
// Initialize all storage disks
|
||||
storageDisks, errs := initStorageDisksWithErrors(endpoints)
|
||||
|
||||
|
@ -276,7 +276,7 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints,
|
|||
humanize.Ordinal(poolCount), setCount, setDriveCount)
|
||||
|
||||
// Initialize erasure code format on disks
|
||||
format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, sErrs)
|
||||
format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, distributionAlgo, sErrs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -345,7 +345,7 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints,
|
|||
}
|
||||
|
||||
// Format disks before initialization of object layer.
|
||||
func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) ([]StorageAPI, *formatErasureV3, error) {
|
||||
func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID, distributionAlgo string) ([]StorageAPI, *formatErasureV3, error) {
|
||||
if len(endpoints) == 0 || setCount == 0 || setDriveCount == 0 {
|
||||
return nil, nil, errInvalidArgument
|
||||
}
|
||||
|
@ -372,7 +372,7 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCou
|
|||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
storageDisks, format, err := connectLoadInitFormats(tries, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID)
|
||||
storageDisks, format, err := connectLoadInitFormats(tries, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID, distributionAlgo)
|
||||
if err != nil {
|
||||
tries++
|
||||
switch err {
|
||||
|
|
Loading…
Reference in New Issue