mirror of https://github.com/minio/minio.git
do not have to use the same distributionAlgo as first pool (#19031)
when we expand via pools, there is no reason to stick with the same distributionAlgo as the rest. Since the algo only makes sense with-in a pool not across pools. This allows for newer pools to use newer codepaths to avoid legacy file lookups when they have a pre-existing deployment from 2019, they can expand their new pool to be of a newer distribution format, allowing the pool to be more performant.
This commit is contained in:
parent
a9cf32811c
commit
e3fbac9e24
|
@ -2351,7 +2351,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
||||||
case dns.ErrBucketConflict:
|
case dns.ErrBucketConflict:
|
||||||
apiErr = ErrBucketAlreadyExists
|
apiErr = ErrBucketAlreadyExists
|
||||||
default:
|
default:
|
||||||
if strings.Contains(err.Error(), "requested declared a Content-Length") {
|
if strings.Contains(err.Error(), "request declared a Content-Length") {
|
||||||
apiErr = ErrIncompleteBody
|
apiErr = ErrIncompleteBody
|
||||||
} else {
|
} else {
|
||||||
apiErr = ErrInternalError
|
apiErr = ErrInternalError
|
||||||
|
|
|
@ -72,15 +72,15 @@ func (z *erasureServerPools) SinglePool() bool {
|
||||||
func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) {
|
func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServerPools) (ObjectLayer, error) {
|
||||||
var (
|
var (
|
||||||
deploymentID string
|
deploymentID string
|
||||||
distributionAlgo string
|
|
||||||
commonParityDrives int
|
commonParityDrives int
|
||||||
err error
|
err error
|
||||||
|
|
||||||
formats = make([]*formatErasureV3, len(endpointServerPools))
|
formats = make([]*formatErasureV3, len(endpointServerPools))
|
||||||
storageDisks = make([][]StorageAPI, len(endpointServerPools))
|
storageDisks = make([][]StorageAPI, len(endpointServerPools))
|
||||||
z = &erasureServerPools{
|
z = &erasureServerPools{
|
||||||
serverPools: make([]*erasureSets, len(endpointServerPools)),
|
serverPools: make([]*erasureSets, len(endpointServerPools)),
|
||||||
s3Peer: NewS3PeerSys(endpointServerPools),
|
s3Peer: NewS3PeerSys(endpointServerPools),
|
||||||
|
distributionAlgo: formatErasureVersionV3DistributionAlgoV3,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -126,7 +126,7 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
||||||
|
|
||||||
bootstrapTrace("waitForFormatErasure: loading disks", func() {
|
bootstrapTrace("waitForFormatErasure: loading disks", func() {
|
||||||
storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1,
|
storageDisks[i], formats[i], err = waitForFormatErasure(local, ep.Endpoints, i+1,
|
||||||
ep.SetCount, ep.DrivesPerSet, deploymentID, distributionAlgo)
|
ep.SetCount, ep.DrivesPerSet, deploymentID)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -137,10 +137,6 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
||||||
deploymentID = formats[i].ID
|
deploymentID = formats[i].ID
|
||||||
}
|
}
|
||||||
|
|
||||||
if distributionAlgo == "" {
|
|
||||||
distributionAlgo = formats[i].Erasure.DistributionAlgo
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate if users brought different DeploymentID pools.
|
// Validate if users brought different DeploymentID pools.
|
||||||
if deploymentID != formats[i].ID {
|
if deploymentID != formats[i].ID {
|
||||||
return nil, fmt.Errorf("all pools must have same deployment ID - expected %s, got %s for pool(%s)", deploymentID, formats[i].ID, humanize.Ordinal(i+1))
|
return nil, fmt.Errorf("all pools must have same deployment ID - expected %s, got %s for pool(%s)", deploymentID, formats[i].ID, humanize.Ordinal(i+1))
|
||||||
|
@ -157,10 +153,6 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ
|
||||||
z.deploymentID = uuid.MustParse(deploymentID)
|
z.deploymentID = uuid.MustParse(deploymentID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if distributionAlgo != "" && z.distributionAlgo == "" {
|
|
||||||
z.distributionAlgo = distributionAlgo
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, storageDisk := range storageDisks[i] {
|
for _, storageDisk := range storageDisks[i] {
|
||||||
if storageDisk != nil && storageDisk.IsLocal() {
|
if storageDisk != nil && storageDisk.IsLocal() {
|
||||||
localDrives = append(localDrives, storageDisk)
|
localDrives = append(localDrives, storageDisk)
|
||||||
|
|
|
@ -174,18 +174,18 @@ func TestNewErasureSets(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoints := mustGetNewEndpoints(0, 16, erasureDisks...)
|
endpoints := mustGetNewEndpoints(0, 16, erasureDisks...)
|
||||||
_, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "", "")
|
_, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "")
|
||||||
if err != errInvalidArgument {
|
if err != errInvalidArgument {
|
||||||
t.Fatalf("Expecting error, got %s", err)
|
t.Fatalf("Expecting error, got %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "", "")
|
_, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "")
|
||||||
if err != errInvalidArgument {
|
if err != errInvalidArgument {
|
||||||
t.Fatalf("Expecting error, got %s", err)
|
t.Fatalf("Expecting error, got %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initializes all erasure disks
|
// Initializes all erasure disks
|
||||||
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "", "")
|
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unable to format drives for erasure, %s", err)
|
t.Fatalf("Unable to format drives for erasure, %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -761,7 +761,7 @@ func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats
|
||||||
}
|
}
|
||||||
|
|
||||||
// initFormatErasure - save Erasure format configuration on all disks.
|
// initFormatErasure - save Erasure format configuration on all disks.
|
||||||
func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, setDriveCount int, deploymentID, distributionAlgo string, sErrs []error) (*formatErasureV3, error) {
|
func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, setDriveCount int, deploymentID string, sErrs []error) (*formatErasureV3, error) {
|
||||||
format := newFormatErasureV3(setCount, setDriveCount)
|
format := newFormatErasureV3(setCount, setDriveCount)
|
||||||
formats := make([]*formatErasureV3, len(storageDisks))
|
formats := make([]*formatErasureV3, len(storageDisks))
|
||||||
wantAtMost, err := ecDrivesNoConfig(setDriveCount)
|
wantAtMost, err := ecDrivesNoConfig(setDriveCount)
|
||||||
|
@ -778,9 +778,6 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount,
|
||||||
}
|
}
|
||||||
newFormat := format.Clone()
|
newFormat := format.Clone()
|
||||||
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
newFormat.Erasure.This = format.Erasure.Sets[i][j]
|
||||||
if distributionAlgo != "" {
|
|
||||||
newFormat.Erasure.DistributionAlgo = distributionAlgo
|
|
||||||
}
|
|
||||||
if deploymentID != "" {
|
if deploymentID != "" {
|
||||||
newFormat.ID = deploymentID
|
newFormat.ID = deploymentID
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,7 +154,7 @@ func isServerResolvable(endpoint Endpoint, timeout time.Duration) error {
|
||||||
// connect to list of endpoints and load all Erasure disk formats, validate the formats are correct
|
// connect to list of endpoints and load all Erasure disk formats, validate the formats are correct
|
||||||
// and are in quorum, if no formats are found attempt to initialize all of them for the first
|
// and are in quorum, if no formats are found attempt to initialize all of them for the first
|
||||||
// time. additionally make sure to close all the disks used in this attempt.
|
// time. additionally make sure to close all the disks used in this attempt.
|
||||||
func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID, distributionAlgo string) (storageDisks []StorageAPI, format *formatErasureV3, err error) {
|
func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) {
|
||||||
// Initialize all storage disks
|
// Initialize all storage disks
|
||||||
storageDisks, errs := initStorageDisksWithErrors(endpoints, storageOpts{cleanUp: true, healthCheck: true})
|
storageDisks, errs := initStorageDisksWithErrors(endpoints, storageOpts{cleanUp: true, healthCheck: true})
|
||||||
|
|
||||||
|
@ -221,7 +221,7 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo
|
||||||
humanize.Ordinal(poolCount), setCount, setDriveCount)
|
humanize.Ordinal(poolCount), setCount, setDriveCount)
|
||||||
|
|
||||||
// Initialize erasure code format on disks
|
// Initialize erasure code format on disks
|
||||||
format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, distributionAlgo, sErrs)
|
format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, sErrs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -260,7 +260,7 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format disks before initialization of object layer.
|
// Format disks before initialization of object layer.
|
||||||
func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID, distributionAlgo string) ([]StorageAPI, *formatErasureV3, error) {
|
func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) ([]StorageAPI, *formatErasureV3, error) {
|
||||||
if len(endpoints) == 0 || setCount == 0 || setDriveCount == 0 {
|
if len(endpoints) == 0 || setCount == 0 || setDriveCount == 0 {
|
||||||
return nil, nil, errInvalidArgument
|
return nil, nil, errInvalidArgument
|
||||||
}
|
}
|
||||||
|
@ -277,7 +277,7 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCou
|
||||||
verbose bool
|
verbose bool
|
||||||
)
|
)
|
||||||
|
|
||||||
storageDisks, format, err := connectLoadInitFormats(verbose, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID, distributionAlgo)
|
storageDisks, format, err := connectLoadInitFormats(verbose, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return storageDisks, format, nil
|
return storageDisks, format, nil
|
||||||
}
|
}
|
||||||
|
@ -295,7 +295,7 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCou
|
||||||
tries = 1
|
tries = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
storageDisks, format, err := connectLoadInitFormats(verbose, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID, distributionAlgo)
|
storageDisks, format, err := connectLoadInitFormats(verbose, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return storageDisks, format, nil
|
return storageDisks, format, nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue