mirror of
https://github.com/minio/minio.git
synced 2024-12-23 21:55:53 -05:00
rename last remaining Zone->Pool
This commit is contained in:
parent
e7f6051f19
commit
1debd722b5
@ -66,7 +66,7 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
||||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
|
||||
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
|
||||
|
||||
newAllSubsystems()
|
||||
|
||||
@ -97,7 +97,7 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetZoneEndpoints(erasureDirs...)
|
||||
endpoints := mustGetPoolEndpoints(erasureDirs...)
|
||||
globalPolicySys = NewPolicySys()
|
||||
objLayer, err := newErasureServerPools(ctx, endpoints)
|
||||
if err != nil {
|
||||
|
@ -123,7 +123,7 @@ wait:
|
||||
// Reset to next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
|
||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
||||
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||
|
||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||
if len(healDisks) > 0 {
|
||||
@ -136,9 +136,9 @@ wait:
|
||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||
len(healDisks)))
|
||||
|
||||
erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||
for i := range z.serverPools {
|
||||
erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
erasureSetInPoolDisksToHeal[i] = map[int][]StorageAPI{}
|
||||
}
|
||||
}
|
||||
|
||||
@ -154,7 +154,7 @@ wait:
|
||||
continue
|
||||
}
|
||||
|
||||
poolIdx := globalEndpoints.GetLocalZoneIdx(disk.Endpoint())
|
||||
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
|
||||
if poolIdx < 0 {
|
||||
continue
|
||||
}
|
||||
@ -169,7 +169,7 @@ wait:
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetInZoneDisksToHeal[poolIdx][setIndex] = append(erasureSetInZoneDisksToHeal[poolIdx][setIndex], disk)
|
||||
erasureSetInPoolDisksToHeal[poolIdx][setIndex] = append(erasureSetInPoolDisksToHeal[poolIdx][setIndex], disk)
|
||||
}
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx)
|
||||
@ -179,7 +179,7 @@ wait:
|
||||
return buckets[i].Created.After(buckets[j].Created)
|
||||
})
|
||||
|
||||
for i, setMap := range erasureSetInZoneDisksToHeal {
|
||||
for i, setMap := range erasureSetInPoolDisksToHeal {
|
||||
for setIndex, disks := range setMap {
|
||||
for _, disk := range disks {
|
||||
logger.Info("Healing disk '%s' on %s pool", disk, humanize.Ordinal(i+1))
|
||||
|
@ -353,7 +353,7 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
endpointServerPools = append(endpointServerPools, ZoneEndpoints{
|
||||
endpointServerPools = append(endpointServerPools, PoolEndpoints{
|
||||
SetCount: len(setArgs),
|
||||
DrivesPerSet: len(setArgs[0]),
|
||||
Endpoints: endpointList,
|
||||
@ -373,7 +373,7 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
if err = endpointServerPools.Add(ZoneEndpoints{
|
||||
if err = endpointServerPools.Add(PoolEndpoints{
|
||||
SetCount: len(setArgs),
|
||||
DrivesPerSet: len(setArgs[0]),
|
||||
Endpoints: endpointList,
|
||||
|
@ -196,20 +196,20 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ZoneEndpoints represent endpoints in a given pool
|
||||
// PoolEndpoints represent endpoints in a given pool
|
||||
// along with its setCount and setDriveCount.
|
||||
type ZoneEndpoints struct {
|
||||
type PoolEndpoints struct {
|
||||
SetCount int
|
||||
DrivesPerSet int
|
||||
Endpoints Endpoints
|
||||
}
|
||||
|
||||
// EndpointServerPools - list of list of endpoints
|
||||
type EndpointServerPools []ZoneEndpoints
|
||||
type EndpointServerPools []PoolEndpoints
|
||||
|
||||
// GetLocalZoneIdx returns the pool which endpoint belongs to locally.
|
||||
// GetLocalPoolIdx returns the pool which endpoint belongs to locally.
|
||||
// if ep is remote this code will return -1 poolIndex
|
||||
func (l EndpointServerPools) GetLocalZoneIdx(ep Endpoint) int {
|
||||
func (l EndpointServerPools) GetLocalPoolIdx(ep Endpoint) int {
|
||||
for i, zep := range l {
|
||||
for _, cep := range zep.Endpoints {
|
||||
if cep.IsLocal && ep.IsLocal {
|
||||
@ -223,7 +223,7 @@ func (l EndpointServerPools) GetLocalZoneIdx(ep Endpoint) int {
|
||||
}
|
||||
|
||||
// Add add pool endpoints
|
||||
func (l *EndpointServerPools) Add(zeps ZoneEndpoints) error {
|
||||
func (l *EndpointServerPools) Add(zeps PoolEndpoints) error {
|
||||
existSet := set.NewStringSet()
|
||||
for _, zep := range *l {
|
||||
for _, ep := range zep.Endpoints {
|
||||
|
@ -357,7 +357,7 @@ func TestGetLocalPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
zendpoints := mustGetZoneEndpoints(testCase.endpointArgs...)
|
||||
zendpoints := mustGetPoolEndpoints(testCase.endpointArgs...)
|
||||
if !zendpoints[0].Endpoints[0].IsLocal {
|
||||
if err := zendpoints[0].Endpoints.UpdateIsLocal(false); err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
@ -390,7 +390,7 @@ func TestGetRemotePeers(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
zendpoints := mustGetZoneEndpoints(testCase.endpointArgs...)
|
||||
zendpoints := mustGetPoolEndpoints(testCase.endpointArgs...)
|
||||
if !zendpoints[0].Endpoints[0].IsLocal {
|
||||
if err := zendpoints[0].Endpoints.UpdateIsLocal(false); err != nil {
|
||||
t.Errorf("error: expected = <nil>, got = %v", err)
|
||||
|
@ -160,7 +160,7 @@ func TestHealObjectCorrupted(t *testing.T) {
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
// Everything is fine, should return nil
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -310,7 +310,7 @@ func TestHealObjectErasure(t *testing.T) {
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
// Everything is fine, should return nil
|
||||
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
||||
obj, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -400,7 +400,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
|
||||
defer removeRoots(fsDirs)
|
||||
|
||||
// Everything is fine, should return nil
|
||||
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
||||
obj, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func TestShuffleDisks(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...))
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(disks...))
|
||||
if err != nil {
|
||||
removeRoots(disks)
|
||||
t.Fatal(err)
|
||||
@ -190,7 +190,7 @@ func TestEvalDisks(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...))
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(disks...))
|
||||
if err != nil {
|
||||
removeRoots(disks)
|
||||
t.Fatal(err)
|
||||
|
@ -47,7 +47,7 @@ type erasureServerPools struct {
|
||||
shutdown context.CancelFunc
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) SingleZone() bool {
|
||||
func (z *erasureServerPools) SinglePool() bool {
|
||||
return len(z.serverPools) == 1
|
||||
}
|
||||
|
||||
@ -187,9 +187,9 @@ func (p serverPoolsAvailableSpace) TotalAvailable() uint64 {
|
||||
return total
|
||||
}
|
||||
|
||||
// getAvailableZoneIdx will return an index that can hold size bytes.
|
||||
// getAvailablePoolIdx will return an index that can hold size bytes.
|
||||
// -1 is returned if no serverPools have available space for the size given.
|
||||
func (z *erasureServerPools) getAvailableZoneIdx(ctx context.Context, size int64) int {
|
||||
func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, size int64) int {
|
||||
serverPools := z.getServerPoolsAvailableSpace(ctx, size)
|
||||
total := serverPools.TotalAvailable()
|
||||
if total == 0 {
|
||||
@ -260,10 +260,10 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, s
|
||||
return serverPools
|
||||
}
|
||||
|
||||
// getZoneIdx returns the found previous object and its corresponding pool idx,
|
||||
// getPoolIdx returns the found previous object and its corresponding pool idx,
|
||||
// if none are found falls back to most available space pool.
|
||||
func (z *erasureServerPools) getZoneIdx(ctx context.Context, bucket, object string, opts ObjectOptions, size int64) (idx int, err error) {
|
||||
if z.SingleZone() {
|
||||
func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, opts ObjectOptions, size int64) (idx int, err error) {
|
||||
if z.SinglePool() {
|
||||
return 0, nil
|
||||
}
|
||||
for i, pool := range z.serverPools {
|
||||
@ -289,7 +289,7 @@ func (z *erasureServerPools) getZoneIdx(ctx context.Context, bucket, object stri
|
||||
}
|
||||
|
||||
// We multiply the size by 2 to account for erasure coding.
|
||||
idx = z.getAvailableZoneIdx(ctx, size*2)
|
||||
idx = z.getAvailablePoolIdx(ctx, size*2)
|
||||
if idx < 0 {
|
||||
return -1, toObjectErr(errDiskFull)
|
||||
}
|
||||
@ -601,11 +601,11 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
|
||||
|
||||
object = encodeDirObject(object)
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].PutObject(ctx, bucket, object, data, opts)
|
||||
}
|
||||
|
||||
idx, err := z.getZoneIdx(ctx, bucket, object, opts, data.Size())
|
||||
idx, err := z.getPoolIdx(ctx, bucket, object, opts, data.Size())
|
||||
if err != nil {
|
||||
return ObjectInfo{}, err
|
||||
}
|
||||
@ -621,7 +621,7 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob
|
||||
|
||||
object = encodeDirObject(object)
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].DeleteObject(ctx, bucket, object, opts)
|
||||
}
|
||||
for _, pool := range z.serverPools {
|
||||
@ -658,7 +658,7 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
||||
}
|
||||
defer multiDeleteLock.Unlock()
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].DeleteObjects(ctx, bucket, objects, opts)
|
||||
}
|
||||
|
||||
@ -680,7 +680,7 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec
|
||||
|
||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||
|
||||
poolIdx, err := z.getZoneIdx(ctx, dstBucket, dstObject, dstOpts, srcInfo.Size)
|
||||
poolIdx, err := z.getPoolIdx(ctx, dstBucket, dstObject, dstOpts, srcInfo.Size)
|
||||
if err != nil {
|
||||
return objInfo, err
|
||||
}
|
||||
@ -833,7 +833,7 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
|
||||
return ListMultipartsInfo{}, err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||
}
|
||||
|
||||
@ -859,12 +859,12 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
|
||||
return "", err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
|
||||
}
|
||||
|
||||
// We don't know the exact size, so we ask for at least 1GiB file.
|
||||
idx, err := z.getZoneIdx(ctx, bucket, object, opts, 1<<30)
|
||||
idx, err := z.getPoolIdx(ctx, bucket, object, opts, 1<<30)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -888,7 +888,7 @@ func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object,
|
||||
return PartInfo{}, err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||
}
|
||||
|
||||
@ -918,7 +918,7 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec
|
||||
return MultipartInfo{}, err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].GetMultipartInfo(ctx, bucket, object, uploadID, opts)
|
||||
}
|
||||
for _, pool := range z.serverPools {
|
||||
@ -948,7 +948,7 @@ func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object
|
||||
return ListPartsInfo{}, err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
||||
}
|
||||
for _, pool := range z.serverPools {
|
||||
@ -975,7 +975,7 @@ func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, o
|
||||
return err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
|
||||
}
|
||||
|
||||
@ -1004,7 +1004,7 @@ func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket
|
||||
return objInfo, err
|
||||
}
|
||||
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||
}
|
||||
|
||||
@ -1031,7 +1031,7 @@ func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket
|
||||
|
||||
// GetBucketInfo - returns bucket info from one of the erasure coded serverPools.
|
||||
func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
bucketInfo, err = z.serverPools[0].GetBucketInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
return bucketInfo, err
|
||||
@ -1089,7 +1089,7 @@ func (z *erasureServerPools) IsTaggingSupported() bool {
|
||||
// even if one of the serverPools fail to delete buckets, we proceed to
|
||||
// undo a successful operation.
|
||||
func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].DeleteBucket(ctx, bucket, forceDelete)
|
||||
}
|
||||
g := errgroup.WithNErrs(len(z.serverPools))
|
||||
@ -1165,7 +1165,7 @@ func undoDeleteBucketServerPools(ctx context.Context, bucket string, serverPools
|
||||
// sort here just for simplification. As per design it is assumed
|
||||
// that all buckets are present on all serverPools.
|
||||
func (z *erasureServerPools) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
buckets, err = z.serverPools[0].ListBuckets(ctx)
|
||||
} else {
|
||||
for _, pool := range z.serverPools {
|
||||
@ -1428,7 +1428,7 @@ func (z *erasureServerPools) GetMetrics(ctx context.Context) (*BackendMetrics, e
|
||||
return &BackendMetrics{}, NotImplemented{}
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) getZoneAndSet(id string) (int, int, error) {
|
||||
func (z *erasureServerPools) getPoolAndSet(id string) (int, int, error) {
|
||||
for poolIdx := range z.serverPools {
|
||||
format := z.serverPools[poolIdx].format
|
||||
for setIdx, set := range format.Erasure.Sets {
|
||||
@ -1453,7 +1453,7 @@ type HealthOptions struct {
|
||||
type HealthResult struct {
|
||||
Healthy bool
|
||||
HealingDrives int
|
||||
ZoneID, SetID int
|
||||
PoolID, SetID int
|
||||
WriteQuorum int
|
||||
}
|
||||
|
||||
@ -1474,7 +1474,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
|
||||
|
||||
for _, localDiskIDs := range diskIDs {
|
||||
for _, id := range localDiskIDs {
|
||||
poolIdx, setIdx, err := z.getZoneAndSet(id)
|
||||
poolIdx, setIdx, err := z.getPoolAndSet(id)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
@ -1519,7 +1519,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
|
||||
return HealthResult{
|
||||
Healthy: false,
|
||||
HealingDrives: len(aggHealStateResult.HealDisks),
|
||||
ZoneID: poolIdx,
|
||||
PoolID: poolIdx,
|
||||
SetID: setIdx,
|
||||
WriteQuorum: writeQuorum,
|
||||
}
|
||||
@ -1546,7 +1546,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
|
||||
// PutObjectTags - replace or add tags to an existing object
|
||||
func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
|
||||
object = encodeDirObject(object)
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].PutObjectTags(ctx, bucket, object, tags, opts)
|
||||
}
|
||||
|
||||
@ -1576,7 +1576,7 @@ func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object s
|
||||
// DeleteObjectTags - delete object tags from an existing object
|
||||
func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||
object = encodeDirObject(object)
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].DeleteObjectTags(ctx, bucket, object, opts)
|
||||
}
|
||||
for _, pool := range z.serverPools {
|
||||
@ -1605,7 +1605,7 @@ func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, objec
|
||||
// GetObjectTags - get object tags from an existing object
|
||||
func (z *erasureServerPools) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
|
||||
object = encodeDirObject(object)
|
||||
if z.SingleZone() {
|
||||
if z.SinglePool() {
|
||||
return z.serverPools[0].GetObjectTags(ctx, bucket, object, opts)
|
||||
}
|
||||
for _, pool := range z.serverPools {
|
||||
|
@ -437,7 +437,7 @@ func (b *bucketMetacache) deleteAll() {
|
||||
ctx := context.Background()
|
||||
ez, ok := newObjectLayerFn().(*erasureServerPools)
|
||||
if !ok {
|
||||
logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be *erasureZones"))
|
||||
logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be *erasurePools"))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ func TestNewObjectLayer(t *testing.T) {
|
||||
}
|
||||
defer removeRoots(disks)
|
||||
|
||||
obj, err := newObjectLayer(ctx, mustGetZoneEndpoints(disks...))
|
||||
obj, err := newObjectLayer(ctx, mustGetPoolEndpoints(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected object layer initialization error", err)
|
||||
}
|
||||
@ -53,7 +53,7 @@ func TestNewObjectLayer(t *testing.T) {
|
||||
}
|
||||
defer removeRoots(disks)
|
||||
|
||||
obj, err = newObjectLayer(ctx, mustGetZoneEndpoints(disks...))
|
||||
obj, err = newObjectLayer(ctx, mustGetPoolEndpoints(disks...))
|
||||
if err != nil {
|
||||
t.Fatal("Unexpected object layer initialization error", err)
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ func newStorageRESTHTTPServerClient(t *testing.T) (*httptest.Server, *storageRES
|
||||
t.Fatalf("UpdateIsLocal failed %v", err)
|
||||
}
|
||||
|
||||
registerStorageRESTHandlers(router, []ZoneEndpoints{{
|
||||
registerStorageRESTHandlers(router, []PoolEndpoints{{
|
||||
Endpoints: Endpoints{endpoint},
|
||||
}})
|
||||
|
||||
|
@ -205,7 +205,7 @@ func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, err
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
||||
obj, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||
if err != nil {
|
||||
removeRoots(fsDirs)
|
||||
return nil, nil, err
|
||||
@ -331,7 +331,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
||||
credentials := globalActiveCred
|
||||
|
||||
testServer.Obj = objLayer
|
||||
testServer.Disks = mustGetZoneEndpoints(disks...)
|
||||
testServer.Disks = mustGetPoolEndpoints(disks...)
|
||||
testServer.AccessKey = credentials.AccessKey
|
||||
testServer.SecretKey = credentials.SecretKey
|
||||
|
||||
@ -2000,7 +2000,7 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType)
|
||||
if err != nil {
|
||||
t.Fatalf("Initialization of disks for Erasure setup: %s", err)
|
||||
}
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(erasureDisks...))
|
||||
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(erasureDisks...))
|
||||
if err != nil {
|
||||
t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err)
|
||||
}
|
||||
@ -2250,7 +2250,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) {
|
||||
return certOut.Bytes(), keyOut.Bytes(), nil
|
||||
}
|
||||
|
||||
func mustGetZoneEndpoints(args ...string) EndpointServerPools {
|
||||
func mustGetPoolEndpoints(args ...string) EndpointServerPools {
|
||||
endpoints := mustGetNewEndpoints(args...)
|
||||
drivesPerSet := len(args)
|
||||
setCount := 1
|
||||
@ -2258,7 +2258,7 @@ func mustGetZoneEndpoints(args ...string) EndpointServerPools {
|
||||
drivesPerSet = 16
|
||||
setCount = len(args) / 16
|
||||
}
|
||||
return []ZoneEndpoints{{
|
||||
return []PoolEndpoints{{
|
||||
SetCount: setCount,
|
||||
DrivesPerSet: drivesPerSet,
|
||||
Endpoints: endpoints,
|
||||
|
Loading…
Reference in New Issue
Block a user