mirror of https://github.com/minio/minio.git
converge listBuckets() as a peer call (#16346)
This commit is contained in:
parent
14d29b77ae
commit
a15a2556c3
|
@ -235,6 +235,10 @@ func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets
|
|||
// we ignore disk not found errors
|
||||
return nil
|
||||
}
|
||||
if storageDisks[index].Healing() != nil {
|
||||
// we ignore disks under healing
|
||||
return nil
|
||||
}
|
||||
volsInfo, err := storageDisks[index].ListVols(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1721,17 +1721,7 @@ func (z *erasureServerPools) purgeDelete(ctx context.Context, bucket, prefix str
|
|||
// sort here just for simplification. As per design it is assumed
|
||||
// that all buckets are present on all serverPools.
|
||||
func (z *erasureServerPools) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
|
||||
for idx, pool := range z.serverPools {
|
||||
if z.IsSuspended(idx) {
|
||||
continue
|
||||
}
|
||||
buckets, err = pool.ListBuckets(ctx, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
buckets, err = z.s3Peer.ListBuckets(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
"math/rand"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -747,56 +746,6 @@ func (s *erasureSets) IsTaggingSupported() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// List all buckets from one of the set, we are not doing merge
|
||||
// sort here just for simplification. As per design it is assumed
|
||||
// that all buckets are present on all sets.
|
||||
func (s *erasureSets) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
|
||||
var listBuckets []BucketInfo
|
||||
healBuckets := map[string]VolInfo{}
|
||||
for _, set := range s.sets {
|
||||
// lists all unique buckets across drives.
|
||||
if err := listAllBuckets(ctx, set.getDisks(), healBuckets, s.defaultParityCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// include deleted buckets in listBuckets output
|
||||
deletedBuckets := map[string]VolInfo{}
|
||||
|
||||
if opts.Deleted {
|
||||
for _, set := range s.sets {
|
||||
// lists all deleted buckets across drives.
|
||||
if err := listDeletedBuckets(ctx, set.getDisks(), deletedBuckets, s.defaultParityCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, v := range healBuckets {
|
||||
bi := BucketInfo{
|
||||
Name: v.Name,
|
||||
Created: v.Created,
|
||||
}
|
||||
if vi, ok := deletedBuckets[v.Name]; ok {
|
||||
bi.Deleted = vi.Created
|
||||
}
|
||||
listBuckets = append(listBuckets, bi)
|
||||
}
|
||||
for _, v := range deletedBuckets {
|
||||
if _, ok := healBuckets[v.Name]; !ok {
|
||||
listBuckets = append(listBuckets, BucketInfo{
|
||||
Name: v.Name,
|
||||
Deleted: v.Created,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(listBuckets, func(i, j int) bool {
|
||||
return listBuckets[i].Name < listBuckets[j].Name
|
||||
})
|
||||
|
||||
return listBuckets, nil
|
||||
}
|
||||
|
||||
// listDeletedBuckets lists deleted buckets from all disks.
|
||||
func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets map[string]VolInfo, readQuorum int) error {
|
||||
g := errgroup.WithNErrs(len(storageDisks))
|
||||
|
|
|
@ -80,6 +80,51 @@ func NewS3PeerSys(endpoints EndpointServerPools) *S3PeerSys {
|
|||
}
|
||||
}
|
||||
|
||||
// ListBuckets lists buckets across all servers and returns a possible consistent view
|
||||
func (sys *S3PeerSys) ListBuckets(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
|
||||
localBuckets, err := listBucketsLocal(ctx, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nodeBuckets := make([][]BucketInfo, len(sys.peerClients)+1)
|
||||
errs := []error{nil}
|
||||
nodeBuckets[0] = localBuckets
|
||||
|
||||
for idx, client := range sys.peerClients {
|
||||
idx := idx
|
||||
client := client
|
||||
g.Go(func() error {
|
||||
if client == nil {
|
||||
return errPeerOffline
|
||||
}
|
||||
localBuckets, err := client.ListBuckets(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeBuckets[idx+1] = localBuckets
|
||||
return nil
|
||||
}, idx)
|
||||
}
|
||||
|
||||
errs = append(errs, g.Wait()...)
|
||||
|
||||
quorum := (len(sys.allPeerClients) / 2)
|
||||
if err = reduceReadQuorumErrs(ctx, errs, bucketOpIgnoredErrs, quorum); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for idx, buckets := range nodeBuckets {
|
||||
if errs[idx] == nil {
|
||||
return buckets, nil
|
||||
}
|
||||
}
|
||||
|
||||
return []BucketInfo{}, nil
|
||||
}
|
||||
|
||||
// GetBucketInfo returns bucket stat info about bucket on disk across all peers
|
||||
func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (binfo BucketInfo, err error) {
|
||||
g := errgroup.WithNErrs(len(sys.peerClients))
|
||||
|
@ -127,6 +172,21 @@ func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts Buc
|
|||
return bucketInfo, nil
|
||||
}
|
||||
|
||||
func (client *peerS3Client) ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error) {
|
||||
v := url.Values{}
|
||||
v.Set(peerS3BucketDeleted, strconv.FormatBool(opts.Deleted))
|
||||
|
||||
respBody, err := client.call(peerS3MethodListBuckets, v, nil, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer xhttp.DrainBody(respBody)
|
||||
|
||||
var buckets []BucketInfo
|
||||
err = gob.NewDecoder(respBody).Decode(&buckets)
|
||||
return buckets, err
|
||||
}
|
||||
|
||||
// GetBucketInfo returns bucket stat info from a peer
|
||||
func (client *peerS3Client) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error) {
|
||||
v := url.Values{}
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"encoding/gob"
|
||||
"errors"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
|
@ -41,6 +42,7 @@ const (
|
|||
peerS3MethodMakeBucket = "/make-bucket"
|
||||
peerS3MethodGetBucketInfo = "/get-bucket-info"
|
||||
peerS3MethodDeleteBucket = "/delete-bucket"
|
||||
peerS3MethodListBuckets = "/list-buckets"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -77,6 +79,54 @@ func (s *peerS3Server) HealthHandler(w http.ResponseWriter, r *http.Request) {
|
|||
s.IsValid(w, r)
|
||||
}
|
||||
|
||||
func listBucketsLocal(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) {
|
||||
quorum := (len(globalLocalDrives) / 2)
|
||||
|
||||
buckets = make([]BucketInfo, 0, 32)
|
||||
healBuckets := map[string]VolInfo{}
|
||||
|
||||
// lists all unique buckets across drives.
|
||||
if err := listAllBuckets(ctx, globalLocalDrives, healBuckets, quorum); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// include deleted buckets in listBuckets output
|
||||
deletedBuckets := map[string]VolInfo{}
|
||||
|
||||
if opts.Deleted {
|
||||
// lists all deleted buckets across drives.
|
||||
if err := listDeletedBuckets(ctx, globalLocalDrives, deletedBuckets, quorum); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range healBuckets {
|
||||
bi := BucketInfo{
|
||||
Name: v.Name,
|
||||
Created: v.Created,
|
||||
}
|
||||
if vi, ok := deletedBuckets[v.Name]; ok {
|
||||
bi.Deleted = vi.Created
|
||||
}
|
||||
buckets = append(buckets, bi)
|
||||
}
|
||||
|
||||
for _, v := range deletedBuckets {
|
||||
if _, ok := healBuckets[v.Name]; !ok {
|
||||
buckets = append(buckets, BucketInfo{
|
||||
Name: v.Name,
|
||||
Deleted: v.Created,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(buckets, func(i, j int) bool {
|
||||
return buckets[i].Name < buckets[j].Name
|
||||
})
|
||||
|
||||
return buckets, nil
|
||||
}
|
||||
|
||||
func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error) {
|
||||
g := errgroup.WithNErrs(len(globalLocalDrives)).WithConcurrency(32)
|
||||
bucketsInfo := make([]BucketInfo, len(globalLocalDrives))
|
||||
|
@ -184,6 +234,24 @@ func makeBucketLocal(ctx context.Context, bucket string, opts MakeBucketOptions)
|
|||
return reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, (len(globalLocalDrives)/2)+1)
|
||||
}
|
||||
|
||||
func (s *peerS3Server) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
bucketDeleted := r.Form.Get(peerS3BucketDeleted) == "true"
|
||||
|
||||
buckets, err := listBucketsLocal(r.Context(), BucketOptions{
|
||||
Deleted: bucketDeleted,
|
||||
})
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(buckets))
|
||||
}
|
||||
|
||||
// GetBucketInfoHandler implements peer BuckeInfo call, returns bucket create date.
|
||||
func (s *peerS3Server) GetBucketInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
|
@ -253,4 +321,5 @@ func registerPeerS3Handlers(router *mux.Router) {
|
|||
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodMakeBucket).HandlerFunc(httpTraceHdrs(server.MakeBucketHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodGetBucketInfo).HandlerFunc(httpTraceHdrs(server.GetBucketInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodListBuckets).HandlerFunc(httpTraceHdrs(server.ListBucketsHandler))
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue