2018-02-15 20:45:57 -05:00
|
|
|
/*
|
2019-04-09 14:39:42 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
2018-02-15 20:45:57 -05:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2018-02-15 20:45:57 -05:00
|
|
|
"fmt"
|
|
|
|
"hash/crc32"
|
|
|
|
"io"
|
2018-09-20 22:22:09 -04:00
|
|
|
"net/http"
|
2018-02-15 20:45:57 -05:00
|
|
|
"sort"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2018-04-05 18:04:40 -04:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-02-15 20:45:57 -05:00
|
|
|
"github.com/minio/minio/pkg/bpool"
|
|
|
|
"github.com/minio/minio/pkg/madmin"
|
2018-04-24 18:53:30 -04:00
|
|
|
"github.com/minio/minio/pkg/policy"
|
2018-02-15 20:45:57 -05:00
|
|
|
"github.com/minio/minio/pkg/sync/errgroup"
|
|
|
|
)
|
|
|
|
|
2018-04-09 13:25:41 -04:00
|
|
|
// setsStorageAPI is encapsulated type for Close()
|
|
|
|
type setsStorageAPI [][]StorageAPI
|
|
|
|
|
|
|
|
func (s setsStorageAPI) Close() error {
|
|
|
|
for i := 0; i < len(s); i++ {
|
|
|
|
for _, disk := range s[i] {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
disk.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// xlSets implements ObjectLayer combining a static list of erasure coded
|
|
|
|
// object sets. NOTE: There is no dynamic scaling allowed or intended in
|
|
|
|
// current design.
|
|
|
|
type xlSets struct {
|
|
|
|
sets []*xlObjects
|
|
|
|
|
|
|
|
// Reference format.
|
2018-03-15 16:55:23 -04:00
|
|
|
format *formatXLV3
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// xlDisks mutex to lock xlDisks.
|
|
|
|
xlDisksMu sync.RWMutex
|
|
|
|
|
|
|
|
// Re-ordered list of disks per set.
|
2018-04-09 13:25:41 -04:00
|
|
|
xlDisks setsStorageAPI
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// List of endpoints provided on the command line.
|
|
|
|
endpoints EndpointList
|
|
|
|
|
|
|
|
// Total number of sets and the number of disks per set.
|
|
|
|
setCount, drivesPerSet int
|
|
|
|
|
|
|
|
// Done channel to control monitoring loop.
|
|
|
|
disksConnectDoneCh chan struct{}
|
|
|
|
|
|
|
|
// Distribution algorithm of choice.
|
|
|
|
distributionAlgo string
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// Merge tree walk
|
|
|
|
pool *MergeWalkPool
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// isConnected - checks if the endpoint is connected or not.
|
|
|
|
func (s *xlSets) isConnected(endpoint Endpoint) bool {
|
|
|
|
s.xlDisksMu.RLock()
|
|
|
|
defer s.xlDisksMu.RUnlock()
|
|
|
|
|
|
|
|
for i := 0; i < s.setCount; i++ {
|
|
|
|
for j := 0; j < s.drivesPerSet; j++ {
|
|
|
|
if s.xlDisks[i][j] == nil {
|
|
|
|
continue
|
|
|
|
}
|
2018-04-06 19:59:31 -04:00
|
|
|
var endpointStr string
|
|
|
|
if endpoint.IsLocal {
|
|
|
|
endpointStr = endpoint.Path
|
|
|
|
} else {
|
|
|
|
endpointStr = endpoint.String()
|
|
|
|
}
|
|
|
|
if s.xlDisks[i][j].String() != endpointStr {
|
2018-02-15 20:45:57 -05:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
return s.xlDisks[i][j].IsOnline()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initializes a new StorageAPI from the endpoint argument, returns
|
|
|
|
// StorageAPI and also `format` which exists on the disk.
|
2018-03-15 16:55:23 -04:00
|
|
|
func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatXLV3, error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
disk, err := newStorageAPI(endpoint)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
format, err := loadFormatXL(disk)
|
|
|
|
if err != nil {
|
2018-04-04 00:58:48 -04:00
|
|
|
// Close the internal connection to avoid connection leaks.
|
2018-02-15 20:45:57 -05:00
|
|
|
disk.Close()
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return disk, format, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// findDiskIndex - returns the i,j'th position of the input `format` against the reference
|
|
|
|
// format, after successful validation.
|
2018-03-15 16:55:23 -04:00
|
|
|
func findDiskIndex(refFormat, format *formatXLV3) (int, int, error) {
|
|
|
|
if err := formatXLV3Check(refFormat, format); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return 0, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if format.XL.This == offlineDiskUUID {
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s is offline", format.XL.This)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < len(refFormat.XL.Sets); i++ {
|
|
|
|
for j := 0; j < len(refFormat.XL.Sets[0]); j++ {
|
|
|
|
if refFormat.XL.Sets[i][j] == format.XL.This {
|
|
|
|
return i, j, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s not found", format.XL.This)
|
|
|
|
}
|
|
|
|
|
2018-04-09 13:25:41 -04:00
|
|
|
// Re initializes all disks based on the reference format, this function is
|
|
|
|
// only used by HealFormat and ReloadFormat calls.
|
|
|
|
func (s *xlSets) reInitDisks(refFormat *formatXLV3, storageDisks []StorageAPI, formats []*formatXLV3) [][]StorageAPI {
|
|
|
|
xlDisks := make([][]StorageAPI, s.setCount)
|
|
|
|
for i := 0; i < len(refFormat.XL.Sets); i++ {
|
|
|
|
xlDisks[i] = make([]StorageAPI, s.drivesPerSet)
|
|
|
|
}
|
|
|
|
for k := range storageDisks {
|
|
|
|
if storageDisks[k] == nil || formats[k] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i, j, err := findDiskIndex(refFormat, formats[k])
|
|
|
|
if err != nil {
|
|
|
|
reqInfo := (&logger.ReqInfo{}).AppendTags("storageDisk", storageDisks[i].String())
|
|
|
|
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
|
|
|
logger.LogIf(ctx, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
xlDisks[i][j] = storageDisks[k]
|
|
|
|
}
|
|
|
|
return xlDisks
|
|
|
|
}
|
|
|
|
|
2018-09-05 11:25:55 -04:00
|
|
|
// connectDisksWithQuorum is same as connectDisks but waits
|
|
|
|
// for quorum number of formatted disks to be online in
|
|
|
|
// any given sets.
|
|
|
|
func (s *xlSets) connectDisksWithQuorum() {
|
|
|
|
var onlineDisks int
|
2018-10-08 18:47:13 -04:00
|
|
|
for onlineDisks < len(s.endpoints)/2 {
|
2018-09-05 11:25:55 -04:00
|
|
|
for _, endpoint := range s.endpoints {
|
|
|
|
if s.isConnected(endpoint) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
disk, format, err := connectEndpoint(endpoint)
|
|
|
|
if err != nil {
|
|
|
|
printEndpointError(endpoint, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i, j, err := findDiskIndex(s.format, format)
|
|
|
|
if err != nil {
|
|
|
|
// Close the internal connection to avoid connection leaks.
|
|
|
|
disk.Close()
|
|
|
|
printEndpointError(endpoint, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
s.xlDisks[i][j] = disk
|
|
|
|
onlineDisks++
|
|
|
|
}
|
2018-10-04 20:44:06 -04:00
|
|
|
// Sleep for a while - so that we don't go into
|
|
|
|
// 100% CPU when half the disks are online.
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
2018-09-05 11:25:55 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-27 21:11:39 -04:00
|
|
|
// connectDisks - attempt to connect all the endpoints, loads format
|
|
|
|
// and re-arranges the disks in proper position.
|
|
|
|
func (s *xlSets) connectDisks() {
|
|
|
|
for _, endpoint := range s.endpoints {
|
|
|
|
if s.isConnected(endpoint) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
disk, format, err := connectEndpoint(endpoint)
|
|
|
|
if err != nil {
|
|
|
|
printEndpointError(endpoint, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
i, j, err := findDiskIndex(s.format, format)
|
|
|
|
if err != nil {
|
2018-04-04 00:58:48 -04:00
|
|
|
// Close the internal connection to avoid connection leaks.
|
|
|
|
disk.Close()
|
2018-03-27 21:11:39 -04:00
|
|
|
printEndpointError(endpoint, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
s.xlDisksMu.Lock()
|
|
|
|
s.xlDisks[i][j] = disk
|
|
|
|
s.xlDisksMu.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected
|
|
|
|
// endpoints by reconnecting them and making sure to place them into right position in
|
|
|
|
// the set topology, this monitoring happens at a given monitoring interval.
|
2018-04-09 13:25:41 -04:00
|
|
|
func (s *xlSets) monitorAndConnectEndpoints(monitorInterval time.Duration) {
|
2018-02-15 20:45:57 -05:00
|
|
|
ticker := time.NewTicker(monitorInterval)
|
2018-05-04 13:43:20 -04:00
|
|
|
// Stop the timer.
|
|
|
|
defer ticker.Stop()
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
for {
|
|
|
|
select {
|
2019-01-05 17:16:43 -05:00
|
|
|
case <-GlobalServiceDoneCh:
|
2018-04-09 13:25:41 -04:00
|
|
|
return
|
|
|
|
case <-s.disksConnectDoneCh:
|
2018-02-15 20:45:57 -05:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
2018-03-27 21:11:39 -04:00
|
|
|
s.connectDisks()
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetDisks returns a closure for a given set, which provides list of disks per set.
|
|
|
|
func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI {
|
|
|
|
return func() []StorageAPI {
|
|
|
|
s.xlDisksMu.Lock()
|
|
|
|
defer s.xlDisksMu.Unlock()
|
|
|
|
disks := make([]StorageAPI, s.drivesPerSet)
|
|
|
|
copy(disks, s.xlDisks[setIndex])
|
|
|
|
return disks
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs.
|
|
|
|
|
|
|
|
// Initialize new set of erasure coded sets.
|
2018-03-15 16:55:23 -04:00
|
|
|
func newXLSets(endpoints EndpointList, format *formatXLV3, setCount int, drivesPerSet int) (ObjectLayer, error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Initialize the XL sets instance.
|
|
|
|
s := &xlSets{
|
|
|
|
sets: make([]*xlObjects, setCount),
|
|
|
|
xlDisks: make([][]StorageAPI, setCount),
|
|
|
|
endpoints: endpoints,
|
|
|
|
setCount: setCount,
|
|
|
|
drivesPerSet: drivesPerSet,
|
|
|
|
format: format,
|
|
|
|
disksConnectDoneCh: make(chan struct{}),
|
|
|
|
distributionAlgo: format.XL.DistributionAlgo,
|
2019-05-14 16:49:10 -04:00
|
|
|
pool: NewMergeWalkPool(globalMergeLookupTimeout),
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex := newNSLock(globalIsDistXL)
|
2018-06-01 19:41:23 -04:00
|
|
|
|
|
|
|
// Initialize byte pool once for all sets, bpool size is set to
|
|
|
|
// setCount * drivesPerSet with each memory upto blockSizeV1.
|
|
|
|
bp := bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2)
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
for i := 0; i < len(format.XL.Sets); i++ {
|
|
|
|
s.xlDisks[i] = make([]StorageAPI, drivesPerSet)
|
|
|
|
|
|
|
|
// Initialize xl objects for a given set.
|
|
|
|
s.sets[i] = &xlObjects{
|
|
|
|
getDisks: s.GetDisks(i),
|
|
|
|
nsMutex: mutex,
|
2018-06-01 19:41:23 -04:00
|
|
|
bp: bp,
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2019-01-05 17:16:43 -05:00
|
|
|
go s.sets[i].cleanupStaleMultipartUploads(context.Background(), GlobalMultipartCleanupInterval, GlobalMultipartExpiry, GlobalServiceDoneCh)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2018-09-05 11:25:55 -04:00
|
|
|
// Connect disks right away, but wait until we have `format.json` quorum.
|
|
|
|
s.connectDisksWithQuorum()
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Start the disk monitoring and connect routine.
|
2018-04-09 13:25:41 -04:00
|
|
|
go s.monitorAndConnectEndpoints(defaultMonitorConnectEndpointInterval)
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo {
|
2018-02-15 20:45:57 -05:00
|
|
|
var storageInfo StorageInfo
|
2018-08-24 02:35:37 -04:00
|
|
|
storageInfo.Backend.Type = BackendErasure
|
2018-02-15 20:45:57 -05:00
|
|
|
for _, set := range s.sets {
|
2018-03-14 15:01:47 -04:00
|
|
|
lstorageInfo := set.StorageInfo(ctx)
|
2018-05-23 06:11:29 -04:00
|
|
|
storageInfo.Used = storageInfo.Used + lstorageInfo.Used
|
2019-04-05 00:21:50 -04:00
|
|
|
storageInfo.Total = storageInfo.Total + lstorageInfo.Total
|
|
|
|
storageInfo.Available = storageInfo.Available + lstorageInfo.Available
|
2018-02-15 20:45:57 -05:00
|
|
|
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks + lstorageInfo.Backend.OnlineDisks
|
|
|
|
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks + lstorageInfo.Backend.OfflineDisks
|
|
|
|
}
|
|
|
|
|
|
|
|
scData, scParity := getRedundancyCount(standardStorageClass, s.drivesPerSet)
|
|
|
|
storageInfo.Backend.StandardSCData = scData
|
|
|
|
storageInfo.Backend.StandardSCParity = scParity
|
|
|
|
|
|
|
|
rrSCData, rrSCparity := getRedundancyCount(reducedRedundancyStorageClass, s.drivesPerSet)
|
|
|
|
storageInfo.Backend.RRSCData = rrSCData
|
|
|
|
storageInfo.Backend.RRSCParity = rrSCparity
|
|
|
|
|
2018-04-04 00:58:48 -04:00
|
|
|
storageInfo.Backend.Sets = make([][]madmin.DriveInfo, s.setCount)
|
|
|
|
for i := range storageInfo.Backend.Sets {
|
|
|
|
storageInfo.Backend.Sets[i] = make([]madmin.DriveInfo, s.drivesPerSet)
|
|
|
|
}
|
|
|
|
|
|
|
|
storageDisks, err := initStorageDisks(s.endpoints)
|
|
|
|
if err != nil {
|
|
|
|
return storageInfo
|
|
|
|
}
|
|
|
|
defer closeStorageDisks(storageDisks)
|
|
|
|
|
|
|
|
formats, sErrs := loadFormatXLAll(storageDisks)
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
drivesInfo := formatsToDrivesInfo(s.endpoints, formats, sErrs)
|
|
|
|
refFormat, err := getFormatXLInQuorum(formats)
|
|
|
|
if err != nil {
|
|
|
|
// Ignore errors here, since this call cannot do anything at
|
|
|
|
// this point. too many disks are down already.
|
|
|
|
return storageInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
// fill all the available/online endpoints
|
|
|
|
for _, drive := range drivesInfo {
|
|
|
|
if drive.UUID == "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for i := range refFormat.XL.Sets {
|
|
|
|
for j, driveUUID := range refFormat.XL.Sets[i] {
|
|
|
|
if driveUUID == drive.UUID {
|
|
|
|
storageInfo.Backend.Sets[i][j] = drive
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// fill all the offline, missing endpoints as well.
|
|
|
|
for _, drive := range drivesInfo {
|
|
|
|
if drive.UUID == "" {
|
|
|
|
for i := range storageInfo.Backend.Sets {
|
|
|
|
for j := range storageInfo.Backend.Sets[i] {
|
|
|
|
if storageInfo.Backend.Sets[i][j].Endpoint == drive.Endpoint {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if storageInfo.Backend.Sets[i][j].Endpoint == "" {
|
|
|
|
storageInfo.Backend.Sets[i][j] = drive
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return storageInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown shutsdown all erasure coded sets in parallel
|
|
|
|
// returns error upon first error.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) Shutdown(ctx context.Context) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.sets[index].Shutdown(ctx)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, err := range g.Wait() {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MakeBucketLocation - creates a new bucket across all sets simultaneously
|
|
|
|
// even if one of the sets fail to create buckets, we proceed to undo a
|
|
|
|
// successful operation.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
// Create buckets in parallel across all sets.
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.sets[index].MakeBucketWithLocation(ctx, bucket, location)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
errs := g.Wait()
|
|
|
|
// Upon even a single write quorum error we undo all previously created buckets.
|
|
|
|
for _, err := range errs {
|
2018-02-16 23:16:48 -05:00
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(InsufficientWriteQuorum); ok {
|
|
|
|
undoMakeBucketSets(bucket, s.sets, errs)
|
|
|
|
}
|
|
|
|
return err
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function is used to undo a successful MakeBucket operation.
|
|
|
|
func undoMakeBucketSets(bucket string, sets []*xlObjects, errs []error) {
|
|
|
|
g := errgroup.WithNErrs(len(sets))
|
|
|
|
|
|
|
|
// Undo previous make bucket entry on all underlying sets.
|
|
|
|
for index := range sets {
|
|
|
|
index := index
|
|
|
|
if errs[index] == nil {
|
|
|
|
g.Go(func() error {
|
2018-03-15 16:27:16 -04:00
|
|
|
return sets[index].DeleteBucket(context.Background(), bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for all delete bucket to finish.
|
|
|
|
g.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// hashes the key returning an integer based on the input algorithm.
|
|
|
|
// This function currently supports
|
|
|
|
// - CRCMOD
|
|
|
|
// - all new algos.
|
|
|
|
func crcHashMod(key string, cardinality int) int {
|
|
|
|
if cardinality <= 0 {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable)
|
|
|
|
return int(keyCrc % uint32(cardinality))
|
|
|
|
}
|
|
|
|
|
|
|
|
func hashKey(algo string, key string, cardinality int) int {
|
|
|
|
switch algo {
|
|
|
|
case formatXLVersionV2DistributionAlgo:
|
|
|
|
return crcHashMod(key, cardinality)
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
|
|
|
// Unknown algorithm returns -1, also if cardinality is lesser than 0.
|
|
|
|
return -1
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
// Returns always a same erasure coded set for a given input.
|
|
|
|
func (s *xlSets) getHashedSetIndex(input string) int {
|
|
|
|
return hashKey(s.distributionAlgo, input, len(s.sets))
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Returns always a same erasure coded set for a given input.
|
|
|
|
func (s *xlSets) getHashedSet(input string) (set *xlObjects) {
|
2019-05-13 15:25:49 -04:00
|
|
|
return s.sets[s.getHashedSetIndex(input)]
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketInfo - returns bucket info from one of the erasure coded set.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
|
|
|
return s.getHashedSet(bucket).GetBucketInfo(ctx, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsV2 lists all objects in bucket filtered by prefix
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
2018-07-01 00:22:45 -04:00
|
|
|
marker := continuationToken
|
|
|
|
if marker == "" {
|
|
|
|
marker = startAfter
|
|
|
|
}
|
|
|
|
|
|
|
|
loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
2018-02-15 20:45:57 -05:00
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
listObjectsV2Info := ListObjectsV2Info{
|
|
|
|
IsTruncated: loi.IsTruncated,
|
|
|
|
ContinuationToken: continuationToken,
|
|
|
|
NextContinuationToken: loi.NextMarker,
|
|
|
|
Objects: loi.Objects,
|
|
|
|
Prefixes: loi.Prefixes,
|
|
|
|
}
|
|
|
|
return listObjectsV2Info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetBucketPolicy persist the new policy on the bucket.
|
2018-04-24 18:53:30 -04:00
|
|
|
func (s *xlSets) SetBucketPolicy(ctx context.Context, bucket string, policy *policy.Policy) error {
|
2018-10-09 17:00:01 -04:00
|
|
|
return savePolicyConfig(ctx, s, bucket, policy)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketPolicy will return a policy on a bucket
|
2018-04-24 18:53:30 -04:00
|
|
|
func (s *xlSets) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
|
2018-06-27 02:59:48 -04:00
|
|
|
return getPolicyConfig(s, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBucketPolicy deletes all policies on bucket
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) DeleteBucketPolicy(ctx context.Context, bucket string) error {
|
2018-04-24 18:53:30 -04:00
|
|
|
return removePolicyConfig(ctx, s, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
|
|
|
func (s *xlSets) IsNotificationSupported() bool {
|
|
|
|
return s.getHashedSet("").IsNotificationSupported()
|
|
|
|
}
|
|
|
|
|
2018-12-05 17:03:42 -05:00
|
|
|
// IsListenBucketSupported returns whether listen bucket notification is applicable for this layer.
|
|
|
|
func (s *xlSets) IsListenBucketSupported() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
|
2018-02-15 20:45:57 -05:00
|
|
|
func (s *xlSets) IsEncryptionSupported() bool {
|
|
|
|
return s.getHashedSet("").IsEncryptionSupported()
|
|
|
|
}
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// IsCompressionSupported returns whether compression is applicable for this layer.
|
|
|
|
func (s *xlSets) IsCompressionSupported() bool {
|
|
|
|
return s.getHashedSet("").IsCompressionSupported()
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// DeleteBucket - deletes a bucket on all sets simultaneously,
|
|
|
|
// even if one of the sets fail to delete buckets, we proceed to
|
|
|
|
// undo a successful operation.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) DeleteBucket(ctx context.Context, bucket string) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
// Delete buckets in parallel across all sets.
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.sets[index].DeleteBucket(ctx, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
errs := g.Wait()
|
|
|
|
// For any write quorum failure, we undo all the delete buckets operation
|
|
|
|
// by creating all the buckets again.
|
|
|
|
for _, err := range errs {
|
2018-02-16 23:16:48 -05:00
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(InsufficientWriteQuorum); ok {
|
|
|
|
undoDeleteBucketSets(bucket, s.sets, errs)
|
|
|
|
}
|
|
|
|
return err
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all bucket metadata.
|
2018-04-05 18:04:40 -04:00
|
|
|
deleteBucketMetadata(ctx, bucket, s)
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function is used to undo a successful DeleteBucket operation.
|
|
|
|
func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) {
|
|
|
|
g := errgroup.WithNErrs(len(sets))
|
|
|
|
|
|
|
|
// Undo previous delete bucket on all underlying sets.
|
|
|
|
for index := range sets {
|
|
|
|
index := index
|
|
|
|
if errs[index] == nil {
|
|
|
|
g.Go(func() error {
|
2018-03-15 16:27:16 -04:00
|
|
|
return sets[index].MakeBucketWithLocation(context.Background(), bucket, "")
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
g.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// List all buckets from one of the set, we are not doing merge
|
|
|
|
// sort here just for simplification. As per design it is assumed
|
|
|
|
// that all buckets are present on all sets.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// Always lists from the same set signified by the empty string.
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.getHashedSet("").ListBuckets(ctx)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// --- Object Operations ---
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
// GetObjectNInfo - returns object info and locked object ReadCloser
|
2018-09-27 06:06:45 -04:00
|
|
|
func (s *xlSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
|
|
|
return s.getHashedSet(object).GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// GetObject - reads an object from the hashedSet based on the object name.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
|
|
|
return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutObject - writes an object to hashedSet based on the object name.
|
2019-02-09 00:31:06 -05:00
|
|
|
func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
return s.getHashedSet(object).PutObject(ctx, bucket, object, data, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetObjectInfo - reads object metadata from the hashedSet based on the object name.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (s *xlSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
return s.getHashedSet(object).GetObjectInfo(ctx, bucket, object, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObject - deletes an object from the hashedSet based on the object name.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) DeleteObject(ctx context.Context, bucket string, object string) (err error) {
|
|
|
|
return s.getHashedSet(object).DeleteObject(ctx, bucket, object)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
// DeleteObjects - bulk delete of objects
|
|
|
|
// Bulk delete is only possible within one set. For that purpose
|
|
|
|
// objects are group by set first, and then bulk delete is invoked
|
|
|
|
// for each set, the error response of each delete will be returned
|
|
|
|
func (s *xlSets) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
|
|
|
|
|
|
|
|
type delObj struct {
|
|
|
|
// Set index associated to this object
|
|
|
|
setIndex int
|
|
|
|
// Original index from the list of arguments
|
|
|
|
// where this object is passed
|
|
|
|
origIndex int
|
|
|
|
// Object name
|
|
|
|
name string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transform []delObj to the list of object names
|
|
|
|
toNames := func(delObjs []delObj) []string {
|
|
|
|
names := make([]string, len(delObjs))
|
|
|
|
for i, obj := range delObjs {
|
|
|
|
names[i] = obj.name
|
|
|
|
}
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
|
|
|
// The result of delete operation on all passed objects
|
|
|
|
var delErrs = make([]error, len(objects))
|
|
|
|
|
|
|
|
// A map between a set and its associated objects
|
|
|
|
var objSetMap = make(map[int][]delObj)
|
|
|
|
|
|
|
|
// Group objects by set index
|
|
|
|
for i, object := range objects {
|
|
|
|
index := s.getHashedSetIndex(object)
|
|
|
|
objSetMap[index] = append(objSetMap[index], delObj{setIndex: index, origIndex: i, name: object})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Invoke bulk delete on objects per set and save
|
|
|
|
// the result of the delete operation
|
|
|
|
for _, objsGroup := range objSetMap {
|
|
|
|
errs, err := s.getHashedSet(objsGroup[0].name).DeleteObjects(ctx, bucket, toNames(objsGroup))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for i, obj := range objsGroup {
|
|
|
|
delErrs[obj.origIndex] = errs[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return delErrs, nil
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// CopyObject - copies objects from one hashedSet to another hashedSet, on server side.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
srcSet := s.getHashedSet(srcObject)
|
|
|
|
destSet := s.getHashedSet(destObject)
|
|
|
|
|
|
|
|
// Check if this request is only metadata update.
|
2018-02-23 18:07:21 -05:00
|
|
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(destBucket, destObject))
|
|
|
|
if cpSrcDstSame && srcInfo.metadataOnly {
|
2018-09-10 12:42:43 -04:00
|
|
|
return srcSet.CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2018-02-23 18:07:21 -05:00
|
|
|
if !cpSrcDstSame {
|
2018-09-25 15:39:46 -04:00
|
|
|
objectDWLock := destSet.nsMutex.NewNSLock(destBucket, destObject)
|
|
|
|
if err := objectDWLock.GetLock(globalObjectTimeout); err != nil {
|
2018-02-23 18:07:21 -05:00
|
|
|
return objInfo, err
|
|
|
|
}
|
2018-09-25 15:39:46 -04:00
|
|
|
defer objectDWLock.Unlock()
|
2018-02-23 18:07:21 -05:00
|
|
|
}
|
2019-02-09 00:31:06 -05:00
|
|
|
putOpts := ObjectOptions{ServerSideEncryption: dstOpts.ServerSideEncryption, UserDefined: srcInfo.UserDefined}
|
|
|
|
return destSet.putObject(ctx, destBucket, destObject, srcInfo.PutObjReader, putOpts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Returns function "listDir" of the type listDirFunc.
|
|
|
|
// disks - used for doing disk.ListDir(). Sets passes set of disks.
|
2019-05-02 01:06:57 -04:00
|
|
|
func listDirSetsFactory(ctx context.Context, sets ...*xlObjects) ListDirFunc {
|
2018-07-27 18:32:19 -04:00
|
|
|
listDirInternal := func(bucket, prefixDir, prefixEntry string, disks []StorageAPI) (mergedEntries []string) {
|
2019-02-27 17:39:22 -05:00
|
|
|
var diskEntries = make([][]string, len(disks))
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for index, disk := range disks {
|
2018-02-15 20:45:57 -05:00
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
2019-02-27 17:39:22 -05:00
|
|
|
wg.Add(1)
|
|
|
|
go func(index int, disk StorageAPI) {
|
|
|
|
defer wg.Done()
|
2019-04-23 17:54:28 -04:00
|
|
|
diskEntries[index], _ = disk.ListDir(bucket, prefixDir, -1, xlMetaJSONFile)
|
2019-02-27 17:39:22 -05:00
|
|
|
}(index, disk)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2019-02-27 17:39:22 -05:00
|
|
|
// Find elements in entries which are not in mergedEntries
|
|
|
|
for _, entries := range diskEntries {
|
2018-02-15 20:45:57 -05:00
|
|
|
var newEntries []string
|
|
|
|
|
|
|
|
for _, entry := range entries {
|
|
|
|
idx := sort.SearchStrings(mergedEntries, entry)
|
|
|
|
// if entry is already present in mergedEntries don't add.
|
|
|
|
if idx < len(mergedEntries) && mergedEntries[idx] == entry {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
newEntries = append(newEntries, entry)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(newEntries) > 0 {
|
|
|
|
// Merge the entries and sort it.
|
|
|
|
mergedEntries = append(mergedEntries, newEntries...)
|
|
|
|
sort.Strings(mergedEntries)
|
|
|
|
}
|
|
|
|
}
|
2019-02-27 17:39:22 -05:00
|
|
|
|
2018-07-27 18:32:19 -04:00
|
|
|
return mergedEntries
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// listDir - lists all the entries at a given prefix and given entry in the prefix.
|
2019-05-02 01:06:57 -04:00
|
|
|
listDir := func(bucket, prefixDir, prefixEntry string) (mergedEntries []string) {
|
2019-02-27 17:39:22 -05:00
|
|
|
for _, set := range sets {
|
2018-02-15 20:45:57 -05:00
|
|
|
var newEntries []string
|
|
|
|
// Find elements in entries which are not in mergedEntries
|
2019-02-27 17:39:22 -05:00
|
|
|
for _, entry := range listDirInternal(bucket, prefixDir, prefixEntry, set.getLoadBalancedDisks()) {
|
2018-02-15 20:45:57 -05:00
|
|
|
idx := sort.SearchStrings(mergedEntries, entry)
|
|
|
|
// if entry is already present in mergedEntries don't add.
|
|
|
|
if idx < len(mergedEntries) && mergedEntries[idx] == entry {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
newEntries = append(newEntries, entry)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(newEntries) > 0 {
|
|
|
|
// Merge the entries and sort it.
|
|
|
|
mergedEntries = append(mergedEntries, newEntries...)
|
|
|
|
sort.Strings(mergedEntries)
|
|
|
|
}
|
|
|
|
}
|
2019-05-02 01:06:57 -04:00
|
|
|
return filterMatchingPrefix(mergedEntries, prefixEntry)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
return listDir
|
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// FileInfoCh - file info channel
|
|
|
|
type FileInfoCh struct {
|
|
|
|
Ch chan FileInfo
|
|
|
|
Prev FileInfo
|
|
|
|
Valid bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pop - pops a cached entry if any, or from the cached channel.
|
|
|
|
func (f *FileInfoCh) Pop() (fi FileInfo, ok bool) {
|
|
|
|
if f.Valid {
|
|
|
|
f.Valid = false
|
|
|
|
return f.Prev, true
|
|
|
|
} // No cached entries found, read from channel
|
|
|
|
f.Prev, ok = <-f.Ch
|
|
|
|
return f.Prev, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push - cache an entry, for Pop() later.
|
|
|
|
func (f *FileInfoCh) Push(fi FileInfo) {
|
|
|
|
f.Prev = fi
|
|
|
|
f.Valid = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate least entry across multiple FileInfo channels, additionally
|
|
|
|
// returns a boolean to indicate if the caller needs to call again.
|
|
|
|
func leastEntry(entriesCh []FileInfoCh, readQuorum int) (FileInfo, bool) {
|
|
|
|
var entriesValid = make([]bool, len(entriesCh))
|
|
|
|
var entries = make([]FileInfo, len(entriesCh))
|
|
|
|
for i := range entriesCh {
|
|
|
|
entries[i], entriesValid[i] = entriesCh[i].Pop()
|
|
|
|
}
|
|
|
|
|
|
|
|
var isTruncated = false
|
|
|
|
for _, valid := range entriesValid {
|
|
|
|
if !valid {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
isTruncated = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
var lentry FileInfo
|
|
|
|
var found bool
|
|
|
|
for i, valid := range entriesValid {
|
|
|
|
if !valid {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
lentry = entries[i]
|
|
|
|
found = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if entries[i].Name < lentry.Name {
|
|
|
|
lentry = entries[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We haven't been able to find any least entry,
|
|
|
|
// this would mean that we don't have valid.
|
|
|
|
if !found {
|
|
|
|
return lentry, isTruncated
|
|
|
|
}
|
|
|
|
|
|
|
|
leastEntryCount := 0
|
|
|
|
for i, valid := range entriesValid {
|
|
|
|
if !valid {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Entries are duplicated across disks,
|
|
|
|
// we should simply skip such entries.
|
|
|
|
if lentry.Name == entries[i].Name && lentry.ModTime.Equal(entries[i].ModTime) {
|
|
|
|
leastEntryCount++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push all entries which are lexically higher
|
|
|
|
// and will be returned later in Pop()
|
|
|
|
entriesCh[i].Push(entries[i])
|
|
|
|
}
|
|
|
|
|
2019-06-09 01:14:07 -04:00
|
|
|
if readQuorum < 0 {
|
|
|
|
return lentry, isTruncated
|
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
quorum := lentry.Quorum
|
|
|
|
if quorum == 0 {
|
|
|
|
quorum = readQuorum
|
|
|
|
}
|
|
|
|
|
|
|
|
if leastEntryCount >= quorum {
|
|
|
|
return lentry, isTruncated
|
|
|
|
}
|
|
|
|
|
|
|
|
return leastEntry(entriesCh, readQuorum)
|
|
|
|
}
|
|
|
|
|
|
|
|
// mergeEntriesCh - merges FileInfo channel to entries upto maxKeys.
|
|
|
|
func mergeEntriesCh(entriesCh []FileInfoCh, maxKeys int, readQuorum int) (entries FilesInfo) {
|
2019-05-22 16:36:16 -04:00
|
|
|
var i = 0
|
|
|
|
for {
|
|
|
|
fi, valid := leastEntry(entriesCh, readQuorum)
|
|
|
|
if !valid {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if i == maxKeys {
|
|
|
|
entries.IsTruncated = true
|
|
|
|
// Re-insert the last entry so it can be
|
|
|
|
// listed in the next listing iteration.
|
|
|
|
for j := range entriesCh {
|
|
|
|
if !entriesCh[j].Valid {
|
|
|
|
entriesCh[j].Push(fi)
|
|
|
|
}
|
|
|
|
}
|
2019-05-14 16:49:10 -04:00
|
|
|
break
|
|
|
|
}
|
|
|
|
entries.Files = append(entries.Files, fi)
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
return entries
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// Starts a walk channel across all disks and returns a slice.
|
|
|
|
func (s *xlSets) startMergeWalks(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh chan struct{}) []FileInfoCh {
|
|
|
|
var entryChs []FileInfoCh
|
2019-04-17 12:52:08 -04:00
|
|
|
for _, set := range s.sets {
|
2019-05-14 16:49:10 -04:00
|
|
|
for _, disk := range set.getDisks() {
|
|
|
|
if disk == nil {
|
|
|
|
// Disk can be offline
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
entryCh, err := disk.Walk(bucket, prefix, marker, recursive, xlMetaJSONFile, readMetadata, endWalkCh)
|
|
|
|
if err != nil {
|
|
|
|
// Disk walk returned error, ignore it.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
entryChs = append(entryChs, FileInfoCh{
|
|
|
|
Ch: entryCh,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return entryChs
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects - implements listing of objects across disks, each disk is indepenently
|
|
|
|
// walked and merged at this layer. Resulting value through the merge process sends
|
|
|
|
// the data in lexically sorted order.
|
2019-06-09 01:14:07 -04:00
|
|
|
func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, heal bool) (loi ListObjectsInfo, err error) {
|
2019-05-14 16:49:10 -04:00
|
|
|
if err = checkListObjsArgs(ctx, bucket, prefix, marker, delimiter, s); err != nil {
|
|
|
|
return loi, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Marker is set validate pre-condition.
|
|
|
|
if marker != "" {
|
|
|
|
// Marker not common with prefix is not implemented. Send an empty response
|
|
|
|
if !hasPrefix(marker, prefix) {
|
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// With max keys of zero we have reached eof, return right here.
|
|
|
|
if maxKeys == 0 {
|
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// For delimiter and prefix as '/' we do not list anything at all
|
|
|
|
// since according to s3 spec we stop at the 'delimiter'
|
|
|
|
// along // with the prefix. On a flat namespace with 'prefix'
|
|
|
|
// as '/' we don't have any entries, since all the keys are
|
|
|
|
// of form 'keyName/...'
|
|
|
|
if delimiter == slashSeparator && prefix == slashSeparator {
|
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Over flowing count - reset to maxObjectList.
|
|
|
|
if maxKeys < 0 || maxKeys > maxObjectList {
|
|
|
|
maxKeys = maxObjectList
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// Default is recursive, if delimiter is set then list non recursive.
|
|
|
|
recursive := true
|
|
|
|
if delimiter == slashSeparator {
|
|
|
|
recursive = false
|
|
|
|
}
|
|
|
|
|
2019-06-09 01:14:07 -04:00
|
|
|
entryChs, endWalkCh := s.pool.Release(listParams{bucket, recursive, marker, prefix, heal})
|
2019-05-14 16:49:10 -04:00
|
|
|
if entryChs == nil {
|
|
|
|
endWalkCh = make(chan struct{})
|
|
|
|
entryChs = s.startMergeWalks(context.Background(), bucket, prefix, marker, recursive, endWalkCh)
|
|
|
|
}
|
|
|
|
|
2019-06-09 01:14:07 -04:00
|
|
|
readQuorum := s.drivesPerSet / 2
|
|
|
|
if heal {
|
|
|
|
readQuorum = -1
|
|
|
|
}
|
|
|
|
|
|
|
|
entries := mergeEntriesCh(entryChs, maxKeys, readQuorum)
|
2019-05-14 16:49:10 -04:00
|
|
|
if len(entries.Files) == 0 {
|
|
|
|
return loi, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
loi.IsTruncated = entries.IsTruncated
|
|
|
|
if loi.IsTruncated {
|
|
|
|
loi.NextMarker = entries.Files[len(entries.Files)-1].Name
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, entry := range entries.Files {
|
|
|
|
var objInfo ObjectInfo
|
|
|
|
if hasSuffix(entry.Name, slashSeparator) {
|
|
|
|
if !recursive {
|
|
|
|
loi.Prefixes = append(loi.Prefixes, entry.Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
objInfo = ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: entry.Name,
|
|
|
|
IsDir: true,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
objInfo = ObjectInfo{
|
|
|
|
IsDir: false,
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: entry.Name,
|
|
|
|
ModTime: entry.ModTime,
|
|
|
|
Size: entry.Size,
|
|
|
|
ContentType: entry.Metadata["content-type"],
|
|
|
|
ContentEncoding: entry.Metadata["content-encoding"],
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract etag from metadata.
|
|
|
|
objInfo.ETag = extractETag(entry.Metadata)
|
|
|
|
|
|
|
|
// All the parts per object.
|
|
|
|
objInfo.Parts = entry.Parts
|
|
|
|
|
|
|
|
// etag/md5Sum has already been extracted. We need to
|
|
|
|
// remove to avoid it from appearing as part of
|
|
|
|
// response headers. e.g, X-Minio-* or X-Amz-*.
|
|
|
|
objInfo.UserDefined = cleanMetadata(entry.Metadata)
|
|
|
|
|
|
|
|
// Update storage class
|
|
|
|
if sc, ok := entry.Metadata[amzStorageClass]; ok {
|
|
|
|
objInfo.StorageClass = sc
|
|
|
|
} else {
|
|
|
|
objInfo.StorageClass = globalMinioDefaultStorageClass
|
|
|
|
}
|
|
|
|
}
|
|
|
|
loi.Objects = append(loi.Objects, objInfo)
|
|
|
|
}
|
|
|
|
if loi.IsTruncated {
|
2019-06-09 01:14:07 -04:00
|
|
|
s.pool.Set(listParams{bucket, recursive, loi.NextMarker, prefix, heal}, entryChs, endWalkCh)
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
return loi, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-06-09 01:14:07 -04:00
|
|
|
// ListObjects - implements listing of objects across disks, each disk is indepenently
|
|
|
|
// walked and merged at this layer. Resulting value through the merge process sends
|
|
|
|
// the data in lexically sorted order.
|
|
|
|
func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
|
|
|
return s.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys, false)
|
|
|
|
}
|
|
|
|
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// In list multipart uploads we are going to treat input prefix as the object,
|
|
|
|
// this means that we are not supporting directory navigation.
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.getHashedSet(prefix).ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initiate a new multipart upload on a hashedSet based on object name.
|
2019-02-09 00:31:06 -05:00
|
|
|
func (s *xlSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
|
|
|
return s.getHashedSet(object).NewMultipartUpload(ctx, bucket, object, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copies a part of an object from source hashedSet to destination hashedSet.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
2018-09-10 12:42:43 -04:00
|
|
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
destSet := s.getHashedSet(destObject)
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
2018-11-14 20:36:41 -05:00
|
|
|
func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
2018-09-10 12:42:43 -04:00
|
|
|
return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
2019-01-05 17:16:43 -05:00
|
|
|
func (s *xlSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
|
|
|
|
return s.getHashedSet(object).ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Aborts an in-progress multipart operation on hashedSet based on the object name.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
|
|
|
return s.getHashedSet(object).AbortMultipartUpload(ctx, bucket, object, uploadID)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
2018-11-14 20:36:41 -05:00
|
|
|
func (s *xlSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
All disks online
|
|
|
|
-----------------
|
|
|
|
- All Unformatted - format all and return success.
|
|
|
|
- Some Unformatted - format all and return success.
|
|
|
|
- Any JBOD inconsistent - return failure
|
|
|
|
- Some are corrupt (missing format.json) - return failure
|
|
|
|
- Any unrecognized disks - return failure
|
|
|
|
|
|
|
|
Some disks are offline and we have quorum.
|
|
|
|
-----------------
|
|
|
|
- Some unformatted - format all and return success,
|
|
|
|
treat disks offline as corrupted.
|
|
|
|
- Any JBOD inconsistent - return failure
|
|
|
|
- Some are corrupt (missing format.json)
|
|
|
|
- Any unrecognized disks - return failure
|
|
|
|
|
|
|
|
No read quorum
|
|
|
|
-----------------
|
|
|
|
failure for all cases.
|
|
|
|
|
|
|
|
// Pseudo code for managing `format.json`.
|
|
|
|
|
|
|
|
// Generic checks.
|
|
|
|
if (no quorum) return error
|
|
|
|
if (any disk is corrupt) return error // Always error
|
|
|
|
if (jbod inconsistent) return error // Always error.
|
|
|
|
if (disks not recognized) // Always error.
|
|
|
|
|
|
|
|
// Specific checks.
|
|
|
|
if (all disks online)
|
|
|
|
if (all disks return format.json)
|
|
|
|
if (jbod consistent)
|
|
|
|
if (all disks recognized)
|
|
|
|
return
|
|
|
|
else
|
|
|
|
if (all disks return format.json not found)
|
|
|
|
return error
|
|
|
|
else (some disks return format.json not found)
|
|
|
|
(heal format)
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
if (some disks return format.json not found)
|
|
|
|
// Offline disks are marked as dead.
|
|
|
|
(heal format) // Offline disks should be marked as dead.
|
|
|
|
return success
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
*/
|
|
|
|
|
2018-03-15 16:55:23 -04:00
|
|
|
func formatsToDrivesInfo(endpoints EndpointList, formats []*formatXLV3, sErrs []error) (beforeDrives []madmin.DriveInfo) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// Existing formats are available (i.e. ok), so save it in
|
|
|
|
// result, also populate disks to be healed.
|
|
|
|
for i, format := range formats {
|
|
|
|
drive := endpoints.GetString(i)
|
|
|
|
switch {
|
|
|
|
case format != nil:
|
|
|
|
beforeDrives = append(beforeDrives, madmin.DriveInfo{
|
|
|
|
UUID: format.XL.This,
|
|
|
|
Endpoint: drive,
|
|
|
|
State: madmin.DriveStateOk,
|
|
|
|
})
|
|
|
|
case sErrs[i] == errUnformattedDisk:
|
|
|
|
beforeDrives = append(beforeDrives, madmin.DriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
Endpoint: drive,
|
|
|
|
State: madmin.DriveStateMissing,
|
|
|
|
})
|
|
|
|
case sErrs[i] == errCorruptedFormat:
|
|
|
|
beforeDrives = append(beforeDrives, madmin.DriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
Endpoint: drive,
|
|
|
|
State: madmin.DriveStateCorrupt,
|
|
|
|
})
|
|
|
|
default:
|
|
|
|
beforeDrives = append(beforeDrives, madmin.DriveInfo{
|
|
|
|
UUID: "",
|
|
|
|
Endpoint: drive,
|
|
|
|
State: madmin.DriveStateOffline,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return beforeDrives
|
|
|
|
}
|
|
|
|
|
2018-04-09 13:25:41 -04:00
|
|
|
// Reloads the format from the disk, usually called by a remote peer notifier while
|
|
|
|
// healing in a distributed setup.
|
|
|
|
func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
|
|
|
|
// Acquire lock on format.json
|
|
|
|
formatLock := s.getHashedSet(formatConfigFile).nsMutex.NewNSLock(minioMetaBucket, formatConfigFile)
|
|
|
|
if err = formatLock.GetRLock(globalHealingTimeout); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer formatLock.RUnlock()
|
|
|
|
|
|
|
|
storageDisks, err := initStorageDisks(s.endpoints)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer func(storageDisks []StorageAPI) {
|
|
|
|
if err != nil {
|
|
|
|
closeStorageDisks(storageDisks)
|
|
|
|
}
|
|
|
|
}(storageDisks)
|
|
|
|
|
|
|
|
formats, sErrs := loadFormatXLAll(storageDisks)
|
|
|
|
if err = checkFormatXLValues(formats); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for index, sErr := range sErrs {
|
|
|
|
if sErr != nil {
|
|
|
|
// Look for acceptable heal errors, for any other
|
|
|
|
// errors we should simply quit and return.
|
|
|
|
if _, ok := formatHealErrors[sErr]; !ok {
|
|
|
|
return fmt.Errorf("Disk %s: %s", s.endpoints[index], sErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
refFormat, err := getFormatXLInQuorum(formats)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// kill the monitoring loop such that we stop writing
|
|
|
|
// to indicate that we will re-initialize everything
|
|
|
|
// with new format.
|
|
|
|
s.disksConnectDoneCh <- struct{}{}
|
|
|
|
|
|
|
|
// Replace the new format.
|
|
|
|
s.format = refFormat
|
|
|
|
|
|
|
|
s.xlDisksMu.Lock()
|
|
|
|
{
|
|
|
|
// Close all existing disks.
|
|
|
|
s.xlDisks.Close()
|
|
|
|
|
|
|
|
// Re initialize disks, after saving the new reference format.
|
|
|
|
s.xlDisks = s.reInitDisks(refFormat, storageDisks, formats)
|
|
|
|
}
|
|
|
|
s.xlDisksMu.Unlock()
|
|
|
|
|
|
|
|
// Restart monitoring loop to monitor reformatted disks again.
|
|
|
|
go s.monitorAndConnectEndpoints(defaultMonitorConnectEndpointInterval)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-02-06 14:44:19 -05:00
|
|
|
// If it is a single node XL and all disks are root disks, it is most likely a test setup, else it is a production setup.
|
|
|
|
// On a test setup we allow creation of format.json on root disks to help with dev/testing.
|
|
|
|
func isTestSetup(infos []DiskInfo, errs []error) bool {
|
|
|
|
rootDiskCount := 0
|
|
|
|
for i := range errs {
|
|
|
|
if errs[i] != nil {
|
|
|
|
// On error it is safer to assume that this is not a test setup.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if infos[i].RootDisk {
|
|
|
|
rootDiskCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// It is a test setup if all disks are root disks.
|
|
|
|
return rootDiskCount == len(infos)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getAllDiskInfos(storageDisks []StorageAPI) ([]DiskInfo, []error) {
|
|
|
|
infos := make([]DiskInfo, len(storageDisks))
|
|
|
|
errs := make([]error, len(storageDisks))
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := range storageDisks {
|
|
|
|
if storageDisks[i] == nil {
|
|
|
|
errs[i] = errDiskNotFound
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
wg.Add(1)
|
|
|
|
go func(i int) {
|
|
|
|
defer wg.Done()
|
|
|
|
infos[i], errs[i] = storageDisks[i].DiskInfo()
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
return infos, errs
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark root disks as down so as not to heal them.
|
|
|
|
func markRootDisksAsDown(storageDisks []StorageAPI) {
|
|
|
|
infos, errs := getAllDiskInfos(storageDisks)
|
|
|
|
if isTestSetup(infos, errs) {
|
|
|
|
// Allow healing of disks for test setups to help with testing.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for i := range storageDisks {
|
|
|
|
if errs[i] != nil {
|
|
|
|
storageDisks[i] = nil
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if infos[i].RootDisk {
|
|
|
|
// We should not heal on root disk. i.e in a situation where the minio-administrator has unmounted a
|
|
|
|
// defective drive we should not heal a path on the root disk.
|
|
|
|
storageDisks[i] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-30 23:37:39 -04:00
|
|
|
// HealFormat - heals missing `format.json` on fresh unformatted disks.
|
|
|
|
// TODO: In future support corrupted disks missing format.json but has erasure
|
|
|
|
// coded data in it.
|
2018-04-09 13:25:41 -04:00
|
|
|
func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// Acquire lock on format.json
|
|
|
|
formatLock := s.getHashedSet(formatConfigFile).nsMutex.NewNSLock(minioMetaBucket, formatConfigFile)
|
2018-04-09 13:25:41 -04:00
|
|
|
if err = formatLock.GetLock(globalHealingTimeout); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return madmin.HealResultItem{}, err
|
|
|
|
}
|
|
|
|
defer formatLock.Unlock()
|
|
|
|
|
2018-04-04 00:58:48 -04:00
|
|
|
storageDisks, err := initStorageDisks(s.endpoints)
|
|
|
|
if err != nil {
|
|
|
|
return madmin.HealResultItem{}, err
|
|
|
|
}
|
2018-04-09 13:25:41 -04:00
|
|
|
|
|
|
|
defer func(storageDisks []StorageAPI) {
|
|
|
|
if err != nil {
|
|
|
|
closeStorageDisks(storageDisks)
|
|
|
|
}
|
|
|
|
}(storageDisks)
|
2018-04-04 00:58:48 -04:00
|
|
|
|
2019-02-06 14:44:19 -05:00
|
|
|
markRootDisksAsDown(storageDisks)
|
2019-01-23 18:29:29 -05:00
|
|
|
|
2018-04-04 00:58:48 -04:00
|
|
|
formats, sErrs := loadFormatXLAll(storageDisks)
|
|
|
|
if err = checkFormatXLValues(formats); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return madmin.HealResultItem{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare heal-result
|
2018-04-09 13:25:41 -04:00
|
|
|
res = madmin.HealResultItem{
|
2018-02-15 20:45:57 -05:00
|
|
|
Type: madmin.HealItemMetadata,
|
|
|
|
Detail: "disk-format",
|
|
|
|
DiskCount: s.setCount * s.drivesPerSet,
|
|
|
|
SetCount: s.setCount,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch all the drive info status.
|
|
|
|
beforeDrives := formatsToDrivesInfo(s.endpoints, formats, sErrs)
|
|
|
|
|
|
|
|
res.After.Drives = make([]madmin.HealDriveInfo, len(beforeDrives))
|
|
|
|
res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives))
|
|
|
|
// Copy "after" drive state too from before.
|
|
|
|
for k, v := range beforeDrives {
|
2019-02-13 07:59:36 -05:00
|
|
|
res.Before.Drives[k] = madmin.HealDriveInfo(v)
|
|
|
|
res.After.Drives[k] = madmin.HealDriveInfo(v)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for index, sErr := range sErrs {
|
|
|
|
if sErr != nil {
|
|
|
|
// Look for acceptable heal errors, for any other
|
|
|
|
// errors we should simply quit and return.
|
|
|
|
if _, ok := formatHealErrors[sErr]; !ok {
|
|
|
|
return res, fmt.Errorf("Disk %s: %s", s.endpoints[index], sErr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-30 23:37:39 -04:00
|
|
|
if !hasAnyErrorsUnformatted(sErrs) {
|
|
|
|
// No unformatted disks found disks are either offline
|
|
|
|
// or online, no healing is required.
|
|
|
|
return res, errNoHealRequired
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// All disks are unformatted, return quorum error.
|
|
|
|
if shouldInitXLDisks(sErrs) {
|
|
|
|
return res, errXLReadQuorum
|
|
|
|
}
|
|
|
|
|
|
|
|
refFormat, err := getFormatXLInQuorum(formats)
|
|
|
|
if err != nil {
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark all UUIDs which might be offline, use list
|
|
|
|
// of formats to mark them appropriately.
|
|
|
|
markUUIDsOffline(refFormat, formats)
|
|
|
|
|
|
|
|
// Initialize a new set of set formats which will be written to disk.
|
|
|
|
newFormatSets := newHealFormatSets(refFormat, s.setCount, s.drivesPerSet, formats, sErrs)
|
|
|
|
|
|
|
|
// Look for all offline/unformatted disks in our reference format,
|
|
|
|
// such that we can fill them up with new UUIDs, this looping also
|
|
|
|
// ensures that the replaced disks allocated evenly across all sets.
|
|
|
|
// Making sure that the redundancy is not lost.
|
|
|
|
for i := range refFormat.XL.Sets {
|
|
|
|
for j := range refFormat.XL.Sets[i] {
|
|
|
|
if refFormat.XL.Sets[i][j] == offlineDiskUUID {
|
|
|
|
for l := range newFormatSets[i] {
|
|
|
|
if newFormatSets[i][l] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if newFormatSets[i][l].XL.This == "" {
|
|
|
|
newFormatSets[i][l].XL.This = mustGetUUID()
|
|
|
|
refFormat.XL.Sets[i][j] = newFormatSets[i][l].XL.This
|
|
|
|
for m, v := range res.After.Drives {
|
|
|
|
if v.Endpoint == s.endpoints.GetString(i*s.drivesPerSet+l) {
|
|
|
|
res.After.Drives[m].UUID = newFormatSets[i][l].XL.This
|
|
|
|
res.After.Drives[m].State = madmin.DriveStateOk
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !dryRun {
|
2018-03-15 16:55:23 -04:00
|
|
|
var tmpNewFormats = make([]*formatXLV3, s.setCount*s.drivesPerSet)
|
2018-02-15 20:45:57 -05:00
|
|
|
for i := range newFormatSets {
|
|
|
|
for j := range newFormatSets[i] {
|
|
|
|
if newFormatSets[i][j] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
tmpNewFormats[i*s.drivesPerSet+j] = newFormatSets[i][j]
|
|
|
|
tmpNewFormats[i*s.drivesPerSet+j].XL.Sets = refFormat.XL.Sets
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize meta volume, if volume already exists ignores it, all disks which
|
|
|
|
// are not found are ignored as well.
|
2018-04-04 00:58:48 -04:00
|
|
|
if err = initFormatXLMetaVolume(storageDisks, tmpNewFormats); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return madmin.HealResultItem{}, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save formats `format.json` across all disks.
|
2018-04-05 18:04:40 -04:00
|
|
|
if err = saveFormatXLAll(ctx, storageDisks, tmpNewFormats); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return madmin.HealResultItem{}, err
|
|
|
|
}
|
|
|
|
|
2018-04-09 13:25:41 -04:00
|
|
|
// kill the monitoring loop such that we stop writing
|
|
|
|
// to indicate that we will re-initialize everything
|
|
|
|
// with new format.
|
|
|
|
s.disksConnectDoneCh <- struct{}{}
|
|
|
|
|
|
|
|
// Replace with new reference format.
|
2018-02-15 20:45:57 -05:00
|
|
|
s.format = refFormat
|
2018-03-27 21:11:39 -04:00
|
|
|
|
2018-04-09 13:25:41 -04:00
|
|
|
s.xlDisksMu.Lock()
|
|
|
|
{
|
|
|
|
// Disconnect/relinquish all existing disks.
|
|
|
|
s.xlDisks.Close()
|
|
|
|
|
|
|
|
// Re initialize disks, after saving the new reference format.
|
|
|
|
s.xlDisks = s.reInitDisks(refFormat, storageDisks, tmpNewFormats)
|
|
|
|
}
|
|
|
|
s.xlDisksMu.Unlock()
|
|
|
|
|
|
|
|
// Restart our monitoring loop to start monitoring newly formatted disks.
|
|
|
|
go s.monitorAndConnectEndpoints(defaultMonitorConnectEndpointInterval)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// HealBucket - heals inconsistent buckets and bucket metadata on all sets.
|
2019-02-10 22:53:13 -05:00
|
|
|
func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (result madmin.HealResultItem, err error) {
|
2018-03-16 18:09:31 -04:00
|
|
|
bucketLock := globalNSMutex.NewNSLock(bucket, "")
|
|
|
|
if err := bucketLock.GetLock(globalHealingTimeout); err != nil {
|
2019-02-10 22:53:13 -05:00
|
|
|
return result, err
|
2018-03-16 18:09:31 -04:00
|
|
|
}
|
|
|
|
defer bucketLock.Unlock()
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Initialize heal result info
|
2019-02-10 22:53:13 -05:00
|
|
|
result = madmin.HealResultItem{
|
2018-02-15 20:45:57 -05:00
|
|
|
Type: madmin.HealItemBucket,
|
|
|
|
Bucket: bucket,
|
|
|
|
DiskCount: s.setCount * s.drivesPerSet,
|
|
|
|
SetCount: s.setCount,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, s := range s.sets {
|
2019-02-10 22:53:13 -05:00
|
|
|
var healResult madmin.HealResultItem
|
|
|
|
healResult, err = s.HealBucket(ctx, bucket, dryRun, remove)
|
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
2019-02-13 07:59:36 -05:00
|
|
|
result.Before.Drives = append(result.Before.Drives, healResult.Before.Drives...)
|
|
|
|
result.After.Drives = append(result.After.Drives, healResult.After.Drives...)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, endpoint := range s.endpoints {
|
|
|
|
var foundBefore bool
|
2019-02-10 22:53:13 -05:00
|
|
|
for _, v := range result.Before.Drives {
|
2018-10-26 13:25:52 -04:00
|
|
|
if endpoint.IsLocal {
|
|
|
|
if v.Endpoint == endpoint.Path {
|
|
|
|
foundBefore = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if v.Endpoint == endpoint.String() {
|
|
|
|
foundBefore = true
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundBefore {
|
2019-02-10 22:53:13 -05:00
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
2018-02-15 20:45:57 -05:00
|
|
|
UUID: "",
|
|
|
|
Endpoint: endpoint.String(),
|
|
|
|
State: madmin.DriveStateOffline,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
var foundAfter bool
|
2019-02-10 22:53:13 -05:00
|
|
|
for _, v := range result.After.Drives {
|
2018-10-26 13:25:52 -04:00
|
|
|
if endpoint.IsLocal {
|
|
|
|
if v.Endpoint == endpoint.Path {
|
|
|
|
foundAfter = true
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if v.Endpoint == endpoint.String() {
|
|
|
|
foundAfter = true
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundAfter {
|
2019-02-10 22:53:13 -05:00
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
2018-02-15 20:45:57 -05:00
|
|
|
UUID: "",
|
|
|
|
Endpoint: endpoint.String(),
|
|
|
|
State: madmin.DriveStateOffline,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we had quorum to write, if not return an appropriate error.
|
2019-02-10 22:53:13 -05:00
|
|
|
_, afterDriveOnline := result.GetOnlineCounts()
|
2018-03-26 19:36:57 -04:00
|
|
|
if afterDriveOnline < ((s.setCount*s.drivesPerSet)/2)+1 {
|
2019-02-10 22:53:13 -05:00
|
|
|
return result, toObjectErr(errXLWriteQuorum, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-02-10 22:53:13 -05:00
|
|
|
return result, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// HealObject - heals inconsistent object on a hashedSet based on object name.
|
2019-03-14 16:08:51 -04:00
|
|
|
func (s *xlSets) HealObject(ctx context.Context, bucket, object string, dryRun, remove bool, scanMode madmin.HealScanMode) (madmin.HealResultItem, error) {
|
|
|
|
return s.getHashedSet(object).HealObject(ctx, bucket, object, dryRun, remove, scanMode)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lists all buckets which need healing.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
listBuckets := []BucketInfo{}
|
|
|
|
var healBuckets = map[string]BucketInfo{}
|
|
|
|
for _, set := range s.sets {
|
|
|
|
buckets, _, err := listAllBuckets(set.getDisks())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, currBucket := range buckets {
|
2019-02-13 07:59:36 -05:00
|
|
|
healBuckets[currBucket.Name] = BucketInfo(currBucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, bucketInfo := range healBuckets {
|
|
|
|
listBuckets = append(listBuckets, bucketInfo)
|
|
|
|
}
|
|
|
|
return listBuckets, nil
|
|
|
|
}
|
|
|
|
|
2019-03-13 20:35:09 -04:00
|
|
|
// HealObjects - Heal all objects recursively at a specified prefix, any
|
|
|
|
// dangling objects deleted as well automatically.
|
|
|
|
func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, healObjectFn func(string, string) error) (err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
recursive := true
|
|
|
|
|
2019-03-13 20:35:09 -04:00
|
|
|
endWalkCh := make(chan struct{})
|
2019-05-02 01:06:57 -04:00
|
|
|
listDir := listDirSetsFactory(ctx, s.sets...)
|
|
|
|
walkResultCh := startTreeWalk(ctx, bucket, prefix, "", recursive, listDir, endWalkCh)
|
2019-03-13 20:35:09 -04:00
|
|
|
for {
|
2018-02-15 20:45:57 -05:00
|
|
|
walkResult, ok := <-walkResultCh
|
|
|
|
if !ok {
|
|
|
|
break
|
|
|
|
}
|
2019-05-14 16:49:10 -04:00
|
|
|
if err := healObjectFn(bucket, walkResult.entry); err != nil {
|
2019-03-13 20:35:09 -04:00
|
|
|
return toObjectErr(err, bucket, walkResult.entry)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
if walkResult.end {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 20:35:09 -04:00
|
|
|
return nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2019-06-09 01:14:07 -04:00
|
|
|
|
|
|
|
func (s *xlSets) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
|
|
|
return s.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys, true)
|
|
|
|
}
|