Add large bucket support for erasure coded backend (#5160)

This PR implements an object layer which
combines input erasure sets of XL layers
into a unified namespace.

This object layer extends the existing
erasure coded implementation, it is assumed
in this design that providing > 16 disks is
a static configuration as well i.e if you started
the setup with 32 disks with 4 sets 8 disks per
pack then you would need to provide 4 sets always.

Some design details and restrictions:

- Objects are distributed using consistent ordering
  to a unique erasure coded layer.
- Each pack has its own dsync so locks are synchronized
  properly at pack (erasure layer).
- Each pack still has a maximum of 16 disks
  requirement, you can start with multiple
  such sets statically.
- Static sets set of disks and cannot be
  changed, there is no elastic expansion allowed.
- Static sets set of disks and cannot be
  changed, there is no elastic removal allowed.
- ListObjects() across sets can be noticeably
  slower since List happens on all servers,
  and is merged at this sets layer.

Fixes #5465
Fixes #5464
Fixes #5461
Fixes #5460
Fixes #5459
Fixes #5458
Fixes #5460
Fixes #5488
Fixes #5489
Fixes #5497
Fixes #5496
This commit is contained in:
Harshavardhana
2018-02-15 17:45:57 -08:00
committed by kannappanr
parent dd80256151
commit fb96779a8a
82 changed files with 5046 additions and 4771 deletions

View File

@@ -91,8 +91,8 @@ func TestAdminStatus(t *testing.T) {
testAdminCmd(statusCmd, t)
}
// TestReInitDisks - test for Admin.ReInitDisks RPC service.
func TestReInitDisks(t *testing.T) {
// TestReInitFormat - test for Admin.ReInitFormat RPC service.
func TestReInitFormat(t *testing.T) {
// Reset global variables to start afresh.
resetTestGlobals()
@@ -138,40 +138,13 @@ func TestReInitDisks(t *testing.T) {
}
authReply := AuthRPCReply{}
err = adminServer.ReInitDisks(&authArgs, &authReply)
err = adminServer.ReInitFormat(&ReInitFormatArgs{
AuthRPCArgs: authArgs,
DryRun: false,
}, &authReply)
if err != nil {
t.Errorf("Expected to pass, but failed with %v", err)
}
token, err = authenticateNode(creds.AccessKey, creds.SecretKey)
if err != nil {
t.Fatal(err)
}
// Negative test case with admin rpc server setup for FS.
globalIsXL = false
fsAdminServer := adminCmd{}
fsArgs := LoginRPCArgs{
AuthToken: token,
Version: globalRPCAPIVersion,
RequestTime: UTCNow(),
}
fsReply := LoginRPCReply{}
err = fsAdminServer.Login(&fsArgs, &fsReply)
if err != nil {
t.Fatalf("Failed to login to fs admin server - %v", err)
}
authArgs = AuthRPCArgs{
AuthToken: token,
Version: globalRPCAPIVersion,
}
authReply = AuthRPCReply{}
// Attempt ReInitDisks service on a FS backend.
err = fsAdminServer.ReInitDisks(&authArgs, &authReply)
if err != errUnsupportedBackend {
t.Errorf("Expected to fail with %v, but received %v",
errUnsupportedBackend, err)
}
}
// TestGetConfig - Test for GetConfig admin RPC.