mirror of
https://github.com/minio/minio.git
synced 2024-12-26 07:05:55 -05:00
fb96779a8a
This PR implements an object layer which combines input erasure sets of XL layers into a unified namespace. This object layer extends the existing erasure coded implementation, it is assumed in this design that providing > 16 disks is a static configuration as well i.e if you started the setup with 32 disks with 4 sets 8 disks per pack then you would need to provide 4 sets always. Some design details and restrictions: - Objects are distributed using consistent ordering to a unique erasure coded layer. - Each pack has its own dsync so locks are synchronized properly at pack (erasure layer). - Each pack still has a maximum of 16 disks requirement, you can start with multiple such sets statically. - Static sets set of disks and cannot be changed, there is no elastic expansion allowed. - Static sets set of disks and cannot be changed, there is no elastic removal allowed. - ListObjects() across sets can be noticeably slower since List happens on all servers, and is merged at this sets layer. Fixes #5465 Fixes #5464 Fixes #5461 Fixes #5460 Fixes #5459 Fixes #5458 Fixes #5460 Fixes #5488 Fixes #5489 Fixes #5497 Fixes #5496
74 lines
2.1 KiB
Go
74 lines
2.1 KiB
Go
/*
|
|
* Minio Cloud Storage, (C) 2018 Minio, Inc.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*
|
|
*/
|
|
|
|
package madmin
|
|
|
|
import (
|
|
"testing"
|
|
)
|
|
|
|
// Tests heal drives missing and offline counts.
|
|
func TestHealDriveCounts(t *testing.T) {
|
|
rs := HealResultItem{}
|
|
rs.Before.Drives = make([]HealDriveInfo, 20)
|
|
rs.After.Drives = make([]HealDriveInfo, 20)
|
|
for i := range rs.Before.Drives {
|
|
if i < 4 {
|
|
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateMissing}
|
|
rs.After.Drives[i] = HealDriveInfo{State: DriveStateMissing}
|
|
} else if i > 4 && i < 15 {
|
|
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateOffline}
|
|
rs.After.Drives[i] = HealDriveInfo{State: DriveStateOffline}
|
|
} else if i > 15 {
|
|
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateCorrupt}
|
|
rs.After.Drives[i] = HealDriveInfo{State: DriveStateCorrupt}
|
|
} else {
|
|
rs.Before.Drives[i] = HealDriveInfo{State: DriveStateOk}
|
|
rs.After.Drives[i] = HealDriveInfo{State: DriveStateOk}
|
|
}
|
|
}
|
|
|
|
i, j := rs.GetOnlineCounts()
|
|
if i > 2 {
|
|
t.Errorf("Expected '2', got %d before online disks", i)
|
|
}
|
|
if j > 2 {
|
|
t.Errorf("Expected '2', got %d after online disks", j)
|
|
}
|
|
i, j = rs.GetOfflineCounts()
|
|
if i > 10 {
|
|
t.Errorf("Expected '10', got %d before offline disks", i)
|
|
}
|
|
if j > 10 {
|
|
t.Errorf("Expected '10', got %d after offline disks", j)
|
|
}
|
|
i, j = rs.GetCorruptedCounts()
|
|
if i > 4 {
|
|
t.Errorf("Expected '4', got %d before corrupted disks", i)
|
|
}
|
|
if j > 4 {
|
|
t.Errorf("Expected '4', got %d after corrupted disks", j)
|
|
}
|
|
i, j = rs.GetMissingCounts()
|
|
if i > 4 {
|
|
t.Errorf("Expected '4', got %d before missing disks", i)
|
|
}
|
|
if j > 4 {
|
|
t.Errorf("Expected '4', got %d after missing disks", i)
|
|
}
|
|
}
|