mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
rename last remaining Zone->Pool
This commit is contained in:
parent
e7f6051f19
commit
1debd722b5
@ -66,7 +66,7 @@ func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, erro
|
|||||||
// Initialize boot time
|
// Initialize boot time
|
||||||
globalBootTime = UTCNow()
|
globalBootTime = UTCNow()
|
||||||
|
|
||||||
globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
|
globalEndpoints = mustGetPoolEndpoints(erasureDirs...)
|
||||||
|
|
||||||
newAllSubsystems()
|
newAllSubsystems()
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
endpoints := mustGetZoneEndpoints(erasureDirs...)
|
endpoints := mustGetPoolEndpoints(erasureDirs...)
|
||||||
globalPolicySys = NewPolicySys()
|
globalPolicySys = NewPolicySys()
|
||||||
objLayer, err := newErasureServerPools(ctx, endpoints)
|
objLayer, err := newErasureServerPools(ctx, endpoints)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -123,7 +123,7 @@ wait:
|
|||||||
// Reset to next interval.
|
// Reset to next interval.
|
||||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||||
|
|
||||||
var erasureSetInZoneDisksToHeal []map[int][]StorageAPI
|
var erasureSetInPoolDisksToHeal []map[int][]StorageAPI
|
||||||
|
|
||||||
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
healDisks := globalBackgroundHealState.getHealLocalDisks()
|
||||||
if len(healDisks) > 0 {
|
if len(healDisks) > 0 {
|
||||||
@ -136,9 +136,9 @@ wait:
|
|||||||
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
logger.Info(fmt.Sprintf("Found drives to heal %d, proceeding to heal content...",
|
||||||
len(healDisks)))
|
len(healDisks)))
|
||||||
|
|
||||||
erasureSetInZoneDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
erasureSetInPoolDisksToHeal = make([]map[int][]StorageAPI, len(z.serverPools))
|
||||||
for i := range z.serverPools {
|
for i := range z.serverPools {
|
||||||
erasureSetInZoneDisksToHeal[i] = map[int][]StorageAPI{}
|
erasureSetInPoolDisksToHeal[i] = map[int][]StorageAPI{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,7 +154,7 @@ wait:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
poolIdx := globalEndpoints.GetLocalZoneIdx(disk.Endpoint())
|
poolIdx := globalEndpoints.GetLocalPoolIdx(disk.Endpoint())
|
||||||
if poolIdx < 0 {
|
if poolIdx < 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -169,7 +169,7 @@ wait:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
erasureSetInZoneDisksToHeal[poolIdx][setIndex] = append(erasureSetInZoneDisksToHeal[poolIdx][setIndex], disk)
|
erasureSetInPoolDisksToHeal[poolIdx][setIndex] = append(erasureSetInPoolDisksToHeal[poolIdx][setIndex], disk)
|
||||||
}
|
}
|
||||||
|
|
||||||
buckets, _ := z.ListBuckets(ctx)
|
buckets, _ := z.ListBuckets(ctx)
|
||||||
@ -179,7 +179,7 @@ wait:
|
|||||||
return buckets[i].Created.After(buckets[j].Created)
|
return buckets[i].Created.After(buckets[j].Created)
|
||||||
})
|
})
|
||||||
|
|
||||||
for i, setMap := range erasureSetInZoneDisksToHeal {
|
for i, setMap := range erasureSetInPoolDisksToHeal {
|
||||||
for setIndex, disks := range setMap {
|
for setIndex, disks := range setMap {
|
||||||
for _, disk := range disks {
|
for _, disk := range disks {
|
||||||
logger.Info("Healing disk '%s' on %s pool", disk, humanize.Ordinal(i+1))
|
logger.Info("Healing disk '%s' on %s pool", disk, humanize.Ordinal(i+1))
|
||||||
|
@ -353,7 +353,7 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
endpointServerPools = append(endpointServerPools, ZoneEndpoints{
|
endpointServerPools = append(endpointServerPools, PoolEndpoints{
|
||||||
SetCount: len(setArgs),
|
SetCount: len(setArgs),
|
||||||
DrivesPerSet: len(setArgs[0]),
|
DrivesPerSet: len(setArgs[0]),
|
||||||
Endpoints: endpointList,
|
Endpoints: endpointList,
|
||||||
@ -373,7 +373,7 @@ func createServerEndpoints(serverAddr string, args ...string) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
if err = endpointServerPools.Add(ZoneEndpoints{
|
if err = endpointServerPools.Add(PoolEndpoints{
|
||||||
SetCount: len(setArgs),
|
SetCount: len(setArgs),
|
||||||
DrivesPerSet: len(setArgs[0]),
|
DrivesPerSet: len(setArgs[0]),
|
||||||
Endpoints: endpointList,
|
Endpoints: endpointList,
|
||||||
|
@ -196,20 +196,20 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZoneEndpoints represent endpoints in a given pool
|
// PoolEndpoints represent endpoints in a given pool
|
||||||
// along with its setCount and setDriveCount.
|
// along with its setCount and setDriveCount.
|
||||||
type ZoneEndpoints struct {
|
type PoolEndpoints struct {
|
||||||
SetCount int
|
SetCount int
|
||||||
DrivesPerSet int
|
DrivesPerSet int
|
||||||
Endpoints Endpoints
|
Endpoints Endpoints
|
||||||
}
|
}
|
||||||
|
|
||||||
// EndpointServerPools - list of list of endpoints
|
// EndpointServerPools - list of list of endpoints
|
||||||
type EndpointServerPools []ZoneEndpoints
|
type EndpointServerPools []PoolEndpoints
|
||||||
|
|
||||||
// GetLocalZoneIdx returns the pool which endpoint belongs to locally.
|
// GetLocalPoolIdx returns the pool which endpoint belongs to locally.
|
||||||
// if ep is remote this code will return -1 poolIndex
|
// if ep is remote this code will return -1 poolIndex
|
||||||
func (l EndpointServerPools) GetLocalZoneIdx(ep Endpoint) int {
|
func (l EndpointServerPools) GetLocalPoolIdx(ep Endpoint) int {
|
||||||
for i, zep := range l {
|
for i, zep := range l {
|
||||||
for _, cep := range zep.Endpoints {
|
for _, cep := range zep.Endpoints {
|
||||||
if cep.IsLocal && ep.IsLocal {
|
if cep.IsLocal && ep.IsLocal {
|
||||||
@ -223,7 +223,7 @@ func (l EndpointServerPools) GetLocalZoneIdx(ep Endpoint) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add add pool endpoints
|
// Add add pool endpoints
|
||||||
func (l *EndpointServerPools) Add(zeps ZoneEndpoints) error {
|
func (l *EndpointServerPools) Add(zeps PoolEndpoints) error {
|
||||||
existSet := set.NewStringSet()
|
existSet := set.NewStringSet()
|
||||||
for _, zep := range *l {
|
for _, zep := range *l {
|
||||||
for _, ep := range zep.Endpoints {
|
for _, ep := range zep.Endpoints {
|
||||||
|
@ -357,7 +357,7 @@ func TestGetLocalPeer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, testCase := range testCases {
|
for i, testCase := range testCases {
|
||||||
zendpoints := mustGetZoneEndpoints(testCase.endpointArgs...)
|
zendpoints := mustGetPoolEndpoints(testCase.endpointArgs...)
|
||||||
if !zendpoints[0].Endpoints[0].IsLocal {
|
if !zendpoints[0].Endpoints[0].IsLocal {
|
||||||
if err := zendpoints[0].Endpoints.UpdateIsLocal(false); err != nil {
|
if err := zendpoints[0].Endpoints.UpdateIsLocal(false); err != nil {
|
||||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||||
@ -390,7 +390,7 @@ func TestGetRemotePeers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
zendpoints := mustGetZoneEndpoints(testCase.endpointArgs...)
|
zendpoints := mustGetPoolEndpoints(testCase.endpointArgs...)
|
||||||
if !zendpoints[0].Endpoints[0].IsLocal {
|
if !zendpoints[0].Endpoints[0].IsLocal {
|
||||||
if err := zendpoints[0].Endpoints.UpdateIsLocal(false); err != nil {
|
if err := zendpoints[0].Endpoints.UpdateIsLocal(false); err != nil {
|
||||||
t.Errorf("error: expected = <nil>, got = %v", err)
|
t.Errorf("error: expected = <nil>, got = %v", err)
|
||||||
|
@ -160,7 +160,7 @@ func TestHealObjectCorrupted(t *testing.T) {
|
|||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
// Everything is fine, should return nil
|
// Everything is fine, should return nil
|
||||||
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -310,7 +310,7 @@ func TestHealObjectErasure(t *testing.T) {
|
|||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
// Everything is fine, should return nil
|
// Everything is fine, should return nil
|
||||||
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
obj, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -400,7 +400,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) {
|
|||||||
defer removeRoots(fsDirs)
|
defer removeRoots(fsDirs)
|
||||||
|
|
||||||
// Everything is fine, should return nil
|
// Everything is fine, should return nil
|
||||||
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
obj, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -142,7 +142,7 @@ func TestShuffleDisks(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...))
|
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(disks...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(disks)
|
removeRoots(disks)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
@ -190,7 +190,7 @@ func TestEvalDisks(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...))
|
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(disks...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(disks)
|
removeRoots(disks)
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -47,7 +47,7 @@ type erasureServerPools struct {
|
|||||||
shutdown context.CancelFunc
|
shutdown context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureServerPools) SingleZone() bool {
|
func (z *erasureServerPools) SinglePool() bool {
|
||||||
return len(z.serverPools) == 1
|
return len(z.serverPools) == 1
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,9 +187,9 @@ func (p serverPoolsAvailableSpace) TotalAvailable() uint64 {
|
|||||||
return total
|
return total
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAvailableZoneIdx will return an index that can hold size bytes.
|
// getAvailablePoolIdx will return an index that can hold size bytes.
|
||||||
// -1 is returned if no serverPools have available space for the size given.
|
// -1 is returned if no serverPools have available space for the size given.
|
||||||
func (z *erasureServerPools) getAvailableZoneIdx(ctx context.Context, size int64) int {
|
func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, size int64) int {
|
||||||
serverPools := z.getServerPoolsAvailableSpace(ctx, size)
|
serverPools := z.getServerPoolsAvailableSpace(ctx, size)
|
||||||
total := serverPools.TotalAvailable()
|
total := serverPools.TotalAvailable()
|
||||||
if total == 0 {
|
if total == 0 {
|
||||||
@ -260,10 +260,10 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, s
|
|||||||
return serverPools
|
return serverPools
|
||||||
}
|
}
|
||||||
|
|
||||||
// getZoneIdx returns the found previous object and its corresponding pool idx,
|
// getPoolIdx returns the found previous object and its corresponding pool idx,
|
||||||
// if none are found falls back to most available space pool.
|
// if none are found falls back to most available space pool.
|
||||||
func (z *erasureServerPools) getZoneIdx(ctx context.Context, bucket, object string, opts ObjectOptions, size int64) (idx int, err error) {
|
func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, opts ObjectOptions, size int64) (idx int, err error) {
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
for i, pool := range z.serverPools {
|
for i, pool := range z.serverPools {
|
||||||
@ -289,7 +289,7 @@ func (z *erasureServerPools) getZoneIdx(ctx context.Context, bucket, object stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We multiply the size by 2 to account for erasure coding.
|
// We multiply the size by 2 to account for erasure coding.
|
||||||
idx = z.getAvailableZoneIdx(ctx, size*2)
|
idx = z.getAvailablePoolIdx(ctx, size*2)
|
||||||
if idx < 0 {
|
if idx < 0 {
|
||||||
return -1, toObjectErr(errDiskFull)
|
return -1, toObjectErr(errDiskFull)
|
||||||
}
|
}
|
||||||
@ -601,11 +601,11 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec
|
|||||||
|
|
||||||
object = encodeDirObject(object)
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].PutObject(ctx, bucket, object, data, opts)
|
return z.serverPools[0].PutObject(ctx, bucket, object, data, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
idx, err := z.getZoneIdx(ctx, bucket, object, opts, data.Size())
|
idx, err := z.getPoolIdx(ctx, bucket, object, opts, data.Size())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ObjectInfo{}, err
|
return ObjectInfo{}, err
|
||||||
}
|
}
|
||||||
@ -621,7 +621,7 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob
|
|||||||
|
|
||||||
object = encodeDirObject(object)
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].DeleteObject(ctx, bucket, object, opts)
|
return z.serverPools[0].DeleteObject(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
for _, pool := range z.serverPools {
|
for _, pool := range z.serverPools {
|
||||||
@ -658,7 +658,7 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o
|
|||||||
}
|
}
|
||||||
defer multiDeleteLock.Unlock()
|
defer multiDeleteLock.Unlock()
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].DeleteObjects(ctx, bucket, objects, opts)
|
return z.serverPools[0].DeleteObjects(ctx, bucket, objects, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -680,7 +680,7 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec
|
|||||||
|
|
||||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||||
|
|
||||||
poolIdx, err := z.getZoneIdx(ctx, dstBucket, dstObject, dstOpts, srcInfo.Size)
|
poolIdx, err := z.getPoolIdx(ctx, dstBucket, dstObject, dstOpts, srcInfo.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return objInfo, err
|
return objInfo, err
|
||||||
}
|
}
|
||||||
@ -833,7 +833,7 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
|
|||||||
return ListMultipartsInfo{}, err
|
return ListMultipartsInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -859,12 +859,12 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
|
return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't know the exact size, so we ask for at least 1GiB file.
|
// We don't know the exact size, so we ask for at least 1GiB file.
|
||||||
idx, err := z.getZoneIdx(ctx, bucket, object, opts, 1<<30)
|
idx, err := z.getPoolIdx(ctx, bucket, object, opts, 1<<30)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -888,7 +888,7 @@ func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object,
|
|||||||
return PartInfo{}, err
|
return PartInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
return z.serverPools[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -918,7 +918,7 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec
|
|||||||
return MultipartInfo{}, err
|
return MultipartInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].GetMultipartInfo(ctx, bucket, object, uploadID, opts)
|
return z.serverPools[0].GetMultipartInfo(ctx, bucket, object, uploadID, opts)
|
||||||
}
|
}
|
||||||
for _, pool := range z.serverPools {
|
for _, pool := range z.serverPools {
|
||||||
@ -948,7 +948,7 @@ func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object
|
|||||||
return ListPartsInfo{}, err
|
return ListPartsInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
return z.serverPools[0].ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
||||||
}
|
}
|
||||||
for _, pool := range z.serverPools {
|
for _, pool := range z.serverPools {
|
||||||
@ -975,7 +975,7 @@ func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, o
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
|
return z.serverPools[0].AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1004,7 +1004,7 @@ func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket
|
|||||||
return objInfo, err
|
return objInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
return z.serverPools[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1031,7 +1031,7 @@ func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket
|
|||||||
|
|
||||||
// GetBucketInfo - returns bucket info from one of the erasure coded serverPools.
|
// GetBucketInfo - returns bucket info from one of the erasure coded serverPools.
|
||||||
func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
bucketInfo, err = z.serverPools[0].GetBucketInfo(ctx, bucket)
|
bucketInfo, err = z.serverPools[0].GetBucketInfo(ctx, bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bucketInfo, err
|
return bucketInfo, err
|
||||||
@ -1089,7 +1089,7 @@ func (z *erasureServerPools) IsTaggingSupported() bool {
|
|||||||
// even if one of the serverPools fail to delete buckets, we proceed to
|
// even if one of the serverPools fail to delete buckets, we proceed to
|
||||||
// undo a successful operation.
|
// undo a successful operation.
|
||||||
func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
|
func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].DeleteBucket(ctx, bucket, forceDelete)
|
return z.serverPools[0].DeleteBucket(ctx, bucket, forceDelete)
|
||||||
}
|
}
|
||||||
g := errgroup.WithNErrs(len(z.serverPools))
|
g := errgroup.WithNErrs(len(z.serverPools))
|
||||||
@ -1165,7 +1165,7 @@ func undoDeleteBucketServerPools(ctx context.Context, bucket string, serverPools
|
|||||||
// sort here just for simplification. As per design it is assumed
|
// sort here just for simplification. As per design it is assumed
|
||||||
// that all buckets are present on all serverPools.
|
// that all buckets are present on all serverPools.
|
||||||
func (z *erasureServerPools) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
func (z *erasureServerPools) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
buckets, err = z.serverPools[0].ListBuckets(ctx)
|
buckets, err = z.serverPools[0].ListBuckets(ctx)
|
||||||
} else {
|
} else {
|
||||||
for _, pool := range z.serverPools {
|
for _, pool := range z.serverPools {
|
||||||
@ -1428,7 +1428,7 @@ func (z *erasureServerPools) GetMetrics(ctx context.Context) (*BackendMetrics, e
|
|||||||
return &BackendMetrics{}, NotImplemented{}
|
return &BackendMetrics{}, NotImplemented{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureServerPools) getZoneAndSet(id string) (int, int, error) {
|
func (z *erasureServerPools) getPoolAndSet(id string) (int, int, error) {
|
||||||
for poolIdx := range z.serverPools {
|
for poolIdx := range z.serverPools {
|
||||||
format := z.serverPools[poolIdx].format
|
format := z.serverPools[poolIdx].format
|
||||||
for setIdx, set := range format.Erasure.Sets {
|
for setIdx, set := range format.Erasure.Sets {
|
||||||
@ -1453,7 +1453,7 @@ type HealthOptions struct {
|
|||||||
type HealthResult struct {
|
type HealthResult struct {
|
||||||
Healthy bool
|
Healthy bool
|
||||||
HealingDrives int
|
HealingDrives int
|
||||||
ZoneID, SetID int
|
PoolID, SetID int
|
||||||
WriteQuorum int
|
WriteQuorum int
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1474,7 +1474,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
|
|||||||
|
|
||||||
for _, localDiskIDs := range diskIDs {
|
for _, localDiskIDs := range diskIDs {
|
||||||
for _, id := range localDiskIDs {
|
for _, id := range localDiskIDs {
|
||||||
poolIdx, setIdx, err := z.getZoneAndSet(id)
|
poolIdx, setIdx, err := z.getPoolAndSet(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.LogIf(ctx, err)
|
logger.LogIf(ctx, err)
|
||||||
continue
|
continue
|
||||||
@ -1519,7 +1519,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
|
|||||||
return HealthResult{
|
return HealthResult{
|
||||||
Healthy: false,
|
Healthy: false,
|
||||||
HealingDrives: len(aggHealStateResult.HealDisks),
|
HealingDrives: len(aggHealStateResult.HealDisks),
|
||||||
ZoneID: poolIdx,
|
PoolID: poolIdx,
|
||||||
SetID: setIdx,
|
SetID: setIdx,
|
||||||
WriteQuorum: writeQuorum,
|
WriteQuorum: writeQuorum,
|
||||||
}
|
}
|
||||||
@ -1546,7 +1546,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
|
|||||||
// PutObjectTags - replace or add tags to an existing object
|
// PutObjectTags - replace or add tags to an existing object
|
||||||
func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
|
func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
|
||||||
object = encodeDirObject(object)
|
object = encodeDirObject(object)
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].PutObjectTags(ctx, bucket, object, tags, opts)
|
return z.serverPools[0].PutObjectTags(ctx, bucket, object, tags, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1576,7 +1576,7 @@ func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object s
|
|||||||
// DeleteObjectTags - delete object tags from an existing object
|
// DeleteObjectTags - delete object tags from an existing object
|
||||||
func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error {
|
||||||
object = encodeDirObject(object)
|
object = encodeDirObject(object)
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].DeleteObjectTags(ctx, bucket, object, opts)
|
return z.serverPools[0].DeleteObjectTags(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
for _, pool := range z.serverPools {
|
for _, pool := range z.serverPools {
|
||||||
@ -1605,7 +1605,7 @@ func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, objec
|
|||||||
// GetObjectTags - get object tags from an existing object
|
// GetObjectTags - get object tags from an existing object
|
||||||
func (z *erasureServerPools) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
|
func (z *erasureServerPools) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
|
||||||
object = encodeDirObject(object)
|
object = encodeDirObject(object)
|
||||||
if z.SingleZone() {
|
if z.SinglePool() {
|
||||||
return z.serverPools[0].GetObjectTags(ctx, bucket, object, opts)
|
return z.serverPools[0].GetObjectTags(ctx, bucket, object, opts)
|
||||||
}
|
}
|
||||||
for _, pool := range z.serverPools {
|
for _, pool := range z.serverPools {
|
||||||
|
@ -437,7 +437,7 @@ func (b *bucketMetacache) deleteAll() {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
ez, ok := newObjectLayerFn().(*erasureServerPools)
|
ez, ok := newObjectLayerFn().(*erasureServerPools)
|
||||||
if !ok {
|
if !ok {
|
||||||
logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be *erasureZones"))
|
logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be *erasurePools"))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ func TestNewObjectLayer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer removeRoots(disks)
|
defer removeRoots(disks)
|
||||||
|
|
||||||
obj, err := newObjectLayer(ctx, mustGetZoneEndpoints(disks...))
|
obj, err := newObjectLayer(ctx, mustGetPoolEndpoints(disks...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected object layer initialization error", err)
|
t.Fatal("Unexpected object layer initialization error", err)
|
||||||
}
|
}
|
||||||
@ -53,7 +53,7 @@ func TestNewObjectLayer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer removeRoots(disks)
|
defer removeRoots(disks)
|
||||||
|
|
||||||
obj, err = newObjectLayer(ctx, mustGetZoneEndpoints(disks...))
|
obj, err = newObjectLayer(ctx, mustGetPoolEndpoints(disks...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("Unexpected object layer initialization error", err)
|
t.Fatal("Unexpected object layer initialization error", err)
|
||||||
}
|
}
|
||||||
|
@ -446,7 +446,7 @@ func newStorageRESTHTTPServerClient(t *testing.T) (*httptest.Server, *storageRES
|
|||||||
t.Fatalf("UpdateIsLocal failed %v", err)
|
t.Fatalf("UpdateIsLocal failed %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
registerStorageRESTHandlers(router, []ZoneEndpoints{{
|
registerStorageRESTHandlers(router, []PoolEndpoints{{
|
||||||
Endpoints: Endpoints{endpoint},
|
Endpoints: Endpoints{endpoint},
|
||||||
}})
|
}})
|
||||||
|
|
||||||
|
@ -205,7 +205,7 @@ func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, err
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
obj, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(fsDirs...))
|
obj, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(fsDirs...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
removeRoots(fsDirs)
|
removeRoots(fsDirs)
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@ -331,7 +331,7 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer {
|
|||||||
credentials := globalActiveCred
|
credentials := globalActiveCred
|
||||||
|
|
||||||
testServer.Obj = objLayer
|
testServer.Obj = objLayer
|
||||||
testServer.Disks = mustGetZoneEndpoints(disks...)
|
testServer.Disks = mustGetPoolEndpoints(disks...)
|
||||||
testServer.AccessKey = credentials.AccessKey
|
testServer.AccessKey = credentials.AccessKey
|
||||||
testServer.SecretKey = credentials.SecretKey
|
testServer.SecretKey = credentials.SecretKey
|
||||||
|
|
||||||
@ -2000,7 +2000,7 @@ func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of disks for Erasure setup: %s", err)
|
t.Fatalf("Initialization of disks for Erasure setup: %s", err)
|
||||||
}
|
}
|
||||||
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(erasureDisks...))
|
objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(erasureDisks...))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err)
|
t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err)
|
||||||
}
|
}
|
||||||
@ -2250,7 +2250,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) {
|
|||||||
return certOut.Bytes(), keyOut.Bytes(), nil
|
return certOut.Bytes(), keyOut.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func mustGetZoneEndpoints(args ...string) EndpointServerPools {
|
func mustGetPoolEndpoints(args ...string) EndpointServerPools {
|
||||||
endpoints := mustGetNewEndpoints(args...)
|
endpoints := mustGetNewEndpoints(args...)
|
||||||
drivesPerSet := len(args)
|
drivesPerSet := len(args)
|
||||||
setCount := 1
|
setCount := 1
|
||||||
@ -2258,7 +2258,7 @@ func mustGetZoneEndpoints(args ...string) EndpointServerPools {
|
|||||||
drivesPerSet = 16
|
drivesPerSet = 16
|
||||||
setCount = len(args) / 16
|
setCount = len(args) / 16
|
||||||
}
|
}
|
||||||
return []ZoneEndpoints{{
|
return []PoolEndpoints{{
|
||||||
SetCount: setCount,
|
SetCount: setCount,
|
||||||
DrivesPerSet: drivesPerSet,
|
DrivesPerSet: drivesPerSet,
|
||||||
Endpoints: endpoints,
|
Endpoints: endpoints,
|
||||||
|
Loading…
Reference in New Issue
Block a user