mirror of
https://github.com/minio/minio.git
synced 2025-11-09 21:49:46 -05:00
Add large bucket support for erasure coded backend (#5160)
This PR implements an object layer which combines input erasure sets of XL layers into a unified namespace. This object layer extends the existing erasure coded implementation, it is assumed in this design that providing > 16 disks is a static configuration as well i.e if you started the setup with 32 disks with 4 sets 8 disks per pack then you would need to provide 4 sets always. Some design details and restrictions: - Objects are distributed using consistent ordering to a unique erasure coded layer. - Each pack has its own dsync so locks are synchronized properly at pack (erasure layer). - Each pack still has a maximum of 16 disks requirement, you can start with multiple such sets statically. - Static sets set of disks and cannot be changed, there is no elastic expansion allowed. - Static sets set of disks and cannot be changed, there is no elastic removal allowed. - ListObjects() across sets can be noticeably slower since List happens on all servers, and is merged at this sets layer. Fixes #5465 Fixes #5464 Fixes #5461 Fixes #5460 Fixes #5459 Fixes #5458 Fixes #5460 Fixes #5488 Fixes #5489 Fixes #5497 Fixes #5496
This commit is contained in:
committed by
kannappanr
parent
dd80256151
commit
fb96779a8a
@@ -82,19 +82,17 @@ func testValidateParity(obj ObjectLayer, instanceType string, dirs []string, t T
|
||||
// Reset global storage class flags
|
||||
resetGlobalStorageEnvs()
|
||||
|
||||
// Set globalEndpoints for a single node XL setup.
|
||||
endpoints := globalEndpoints
|
||||
// Set proper envs for a single node XL setup.
|
||||
saveIsXL := globalIsXL
|
||||
defer func() {
|
||||
globalEndpoints = endpoints
|
||||
globalIsXL = saveIsXL
|
||||
}()
|
||||
|
||||
isXL := globalIsXL
|
||||
defer func() {
|
||||
globalIsXL = isXL
|
||||
}()
|
||||
|
||||
globalIsXL = true
|
||||
globalEndpoints = mustGetNewEndpointList(dirs...)
|
||||
saveSetDriveCount := globalXLSetDriveCount
|
||||
defer func() {
|
||||
globalXLSetDriveCount = saveSetDriveCount
|
||||
}()
|
||||
globalXLSetCount = len(dirs)
|
||||
|
||||
tests := []struct {
|
||||
rrsParity int
|
||||
@@ -131,16 +129,16 @@ func testGetRedundancyCount(obj ObjectLayer, instanceType string, dirs []string,
|
||||
|
||||
tests := []struct {
|
||||
sc string
|
||||
disks []StorageAPI
|
||||
disksCount int
|
||||
expectedData int
|
||||
expectedParity int
|
||||
}{
|
||||
{reducedRedundancyStorageClass, xl.storageDisks, 14, 2},
|
||||
{standardStorageClass, xl.storageDisks, 8, 8},
|
||||
{"", xl.storageDisks, 8, 8},
|
||||
{reducedRedundancyStorageClass, xl.storageDisks, 9, 7},
|
||||
{standardStorageClass, xl.storageDisks, 10, 6},
|
||||
{"", xl.storageDisks, 9, 7},
|
||||
{reducedRedundancyStorageClass, len(xl.storageDisks), 14, 2},
|
||||
{standardStorageClass, len(xl.storageDisks), 8, 8},
|
||||
{"", len(xl.storageDisks), 8, 8},
|
||||
{reducedRedundancyStorageClass, len(xl.storageDisks), 9, 7},
|
||||
{standardStorageClass, len(xl.storageDisks), 10, 6},
|
||||
{"", len(xl.storageDisks), 9, 7},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
// Set env var for test case 4
|
||||
@@ -155,7 +153,7 @@ func testGetRedundancyCount(obj ObjectLayer, instanceType string, dirs []string,
|
||||
if i+1 == 6 {
|
||||
globalStandardStorageClass.Parity = 7
|
||||
}
|
||||
data, parity := getRedundancyCount(tt.sc, len(tt.disks))
|
||||
data, parity := getRedundancyCount(tt.sc, tt.disksCount)
|
||||
if data != tt.expectedData {
|
||||
t.Errorf("Test %d, Expected data disks %d, got %d", i+1, tt.expectedData, data)
|
||||
return
|
||||
|
||||
Reference in New Issue
Block a user