fix: allow expansion with newer constraints for older setups (#11372)

currently we had a restriction where older setups would
need to follow previous style of "stripe" count being same
expansion, we can relax that instead newer pools can be
expanded for older setups with newer constraints of
common parity ratio.
This commit is contained in:
Harshavardhana 2021-01-29 11:40:55 -08:00 committed by GitHub
parent d48ff93ba3
commit 1e53bf2789
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 37 additions and 45 deletions

View File

@ -173,6 +173,22 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
}, nil
}
// ValidateParity validate standard storage class parity.
func ValidateParity(ssParity, setDriveCount int) error {
// SS parity disks should be greater than or equal to minParityDisks.
// Parity below minParityDisks is not supported.
if ssParity > 0 && ssParity < minParityDisks {
return fmt.Errorf("Standard storage class parity %d should be greater than or equal to %d",
ssParity, minParityDisks)
}
if ssParity > setDriveCount/2 {
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2)
}
return nil
}
// Validates the parity disks.
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
// SS parity disks should be greater than or equal to minParityDisks.

View File

@ -57,7 +57,6 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
deploymentID string
distributionAlgo string
commonParityDrives int
drivesPerSet int
err error
formats = make([]*formatErasureV3, len(endpointServerPools))
@ -75,26 +74,17 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
}
}
if drivesPerSet == 0 {
drivesPerSet = ep.DrivesPerSet
}
// If storage class is not set during startup, default values are used
// -- Default for Reduced Redundancy Storage class is, parity = 2
// -- Default for Standard Storage class is, parity = 2 - disks 4, 5
// -- Default for Standard Storage class is, parity = 3 - disks 6, 7
// -- Default for Standard Storage class is, parity = 4 - disks 8 to 16
if commonParityDrives == 0 {
commonParityDrives = ecDrivesNoConfig(ep.DrivesPerSet)
}
// Once distribution algo is set, validate it for the next pool,
// before proceeding to write to drives.
switch distributionAlgo {
case formatErasureVersionV3DistributionAlgoV2:
if drivesPerSet != 0 && drivesPerSet != ep.DrivesPerSet {
return nil, fmt.Errorf("All legacy serverPools should have same drive per set ratio - expected %d, got %d", drivesPerSet, ep.DrivesPerSet)
}
case formatErasureVersionV3DistributionAlgoV3:
parityDrives := ecDrivesNoConfig(ep.DrivesPerSet)
if commonParityDrives != 0 && commonParityDrives != parityDrives {
return nil, fmt.Errorf("All current serverPools should have same parity ratio - expected %d, got %d", commonParityDrives, parityDrives)
}
if err = storageclass.ValidateParity(commonParityDrives, ep.DrivesPerSet); err != nil {
return nil, fmt.Errorf("All current serverPools should have same parity ratio - expected %d, got %d", commonParityDrives, ecDrivesNoConfig(ep.DrivesPerSet))
}
storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1,
@ -122,7 +112,7 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
return nil, fmt.Errorf("All serverPools should have same distributionAlgo expected %s, got %s", distributionAlgo, formats[i].Erasure.DistributionAlgo)
}
z.serverPools[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i])
z.serverPools[i], err = newErasureSets(ctx, ep.Endpoints, storageDisks[i], formats[i], commonParityDrives)
if err != nil {
return nil, err
}

View File

@ -331,26 +331,12 @@ func (s *erasureSets) GetDisks(setIndex int) func() []StorageAPI {
const defaultMonitorConnectEndpointInterval = defaultMonitorNewDiskInterval + time.Second*5
// Initialize new set of erasure coded sets.
func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3) (*erasureSets, error) {
func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3, defaultParityCount int) (*erasureSets, error) {
setCount := len(format.Erasure.Sets)
setDriveCount := len(format.Erasure.Sets[0])
endpointStrings := make([]string, len(endpoints))
// If storage class is not set during startup, default values are used
// -- Default for Reduced Redundancy Storage class is, parity = 2
// -- Default for Standard Storage class is, parity = 2 - disks 4, 5
// -- Default for Standard Storage class is, parity = 3 - disks 6, 7
// -- Default for Standard Storage class is, parity = 4 - disks 8 to 16
var defaultParityCount int
switch format.Erasure.DistributionAlgo {
case formatErasureVersionV3DistributionAlgoV3:
defaultParityCount = getDefaultParityBlocks(setDriveCount)
default:
defaultParityCount = setDriveCount / 2
}
// Initialize the erasure sets instance.
s := &erasureSets{
sets: make([]*erasureObjects, setCount),
@ -1278,7 +1264,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H
}(storageDisks)
formats, sErrs := loadFormatErasureAll(storageDisks, true)
if err = checkFormatErasureValues(formats, s.setDriveCount); err != nil {
if err = checkFormatErasureValues(formats, storageDisks, s.setDriveCount); err != nil {
return madmin.HealResultItem{}, err
}

View File

@ -189,7 +189,7 @@ func TestNewErasureSets(t *testing.T) {
t.Fatalf("Unable to format disks for erasure, %s", err)
}
if _, err := newErasureSets(ctx, endpoints, storageDisks, format); err != nil {
if _, err := newErasureSets(ctx, endpoints, storageDisks, format, ecDrivesNoConfig(16)); err != nil {
t.Fatalf("Unable to initialize erasure")
}
}

View File

@ -444,38 +444,38 @@ func loadFormatErasure(disk StorageAPI) (format *formatErasureV3, err error) {
}
// Valid formatErasure basic versions.
func checkFormatErasureValue(formatErasure *formatErasureV3) error {
func checkFormatErasureValue(formatErasure *formatErasureV3, disk StorageAPI) error {
// Validate format version and format type.
if formatErasure.Version != formatMetaVersionV1 {
return fmt.Errorf("Unsupported version of backend format [%s] found", formatErasure.Version)
return fmt.Errorf("Unsupported version of backend format [%s] found on %s", formatErasure.Version, disk)
}
if formatErasure.Format != formatBackendErasure {
return fmt.Errorf("Unsupported backend format [%s] found", formatErasure.Format)
return fmt.Errorf("Unsupported backend format [%s] found on %s", formatErasure.Format, disk)
}
if formatErasure.Erasure.Version != formatErasureVersionV3 {
return fmt.Errorf("Unsupported Erasure backend format found [%s]", formatErasure.Erasure.Version)
return fmt.Errorf("Unsupported Erasure backend format found [%s] on %s", formatErasure.Erasure.Version, disk)
}
return nil
}
// Check all format values.
func checkFormatErasureValues(formats []*formatErasureV3, setDriveCount int) error {
func checkFormatErasureValues(formats []*formatErasureV3, disks []StorageAPI, setDriveCount int) error {
for i, formatErasure := range formats {
if formatErasure == nil {
continue
}
if err := checkFormatErasureValue(formatErasure); err != nil {
if err := checkFormatErasureValue(formatErasure, disks[i]); err != nil {
return err
}
if len(formats) != len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]) {
return fmt.Errorf("%s disk is already being used in another erasure deployment. (Number of disks specified: %d but the number of disks found in the %s disk's format.json: %d)",
humanize.Ordinal(i+1), len(formats), humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]))
disks[i], len(formats), humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]))
}
// Only if custom erasure drive count is set, verify if the
// set_drive_count was manually set - we need to honor what is
// present on the drives.
if globalCustomErasureDriveCount && len(formatErasure.Erasure.Sets[0]) != setDriveCount {
return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets[0]), setDriveCount)
return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", disks[i], len(formatErasure.Erasure.Sets[0]), setDriveCount)
}
}
return nil

View File

@ -267,7 +267,7 @@ func TestCheckFormatErasureValue(t *testing.T) {
// Valid all test cases.
for i, testCase := range testCases {
if err := checkFormatErasureValue(testCase.format); err != nil && testCase.success {
if err := checkFormatErasureValue(testCase.format, nil); err != nil && testCase.success {
t.Errorf("Test %d: Expected failure %s", i+1, err)
}
}

View File

@ -266,7 +266,7 @@ func connectLoadInitFormats(retryCount int, firstDisk bool, endpoints Endpoints,
// most part unless one of the formats is not consistent
// with expected Erasure format. For example if a user is
// trying to pool FS backend into an Erasure set.
if err = checkFormatErasureValues(formatConfigs, setDriveCount); err != nil {
if err = checkFormatErasureValues(formatConfigs, storageDisks, setDriveCount); err != nil {
return nil, nil, err
}