2018-02-15 20:45:57 -05:00
|
|
|
/*
|
2019-10-21 17:43:04 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
|
2018-02-15 20:45:57 -05:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2018-02-15 20:45:57 -05:00
|
|
|
"fmt"
|
|
|
|
"hash/crc32"
|
|
|
|
"io"
|
2018-09-20 22:22:09 -04:00
|
|
|
"net/http"
|
2020-05-06 17:25:05 -04:00
|
|
|
"sort"
|
2019-07-05 17:06:12 -04:00
|
|
|
"strings"
|
2018-02-15 20:45:57 -05:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2020-05-05 17:18:13 -04:00
|
|
|
"github.com/minio/minio-go/v6/pkg/tags"
|
2019-10-07 01:50:24 -04:00
|
|
|
"github.com/minio/minio/cmd/config/storageclass"
|
|
|
|
xhttp "github.com/minio/minio/cmd/http"
|
2019-12-06 02:16:06 -05:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-02-15 20:45:57 -05:00
|
|
|
"github.com/minio/minio/pkg/bpool"
|
2019-11-13 15:17:45 -05:00
|
|
|
"github.com/minio/minio/pkg/dsync"
|
2018-02-15 20:45:57 -05:00
|
|
|
"github.com/minio/minio/pkg/madmin"
|
|
|
|
"github.com/minio/minio/pkg/sync/errgroup"
|
|
|
|
)
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// setsDsyncLockers is encapsulated type for Close()
|
|
|
|
type setsDsyncLockers [][]dsync.NetLocker
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
// Information of a new disk connection
|
|
|
|
type diskConnectInfo struct {
|
|
|
|
setIndex int
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// xlSets implements ObjectLayer combining a static list of erasure coded
|
|
|
|
// object sets. NOTE: There is no dynamic scaling allowed or intended in
|
|
|
|
// current design.
|
|
|
|
type xlSets struct {
|
2020-05-19 16:53:54 -04:00
|
|
|
GatewayUnsupported
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
sets []*xlObjects
|
|
|
|
|
|
|
|
// Reference format.
|
2018-03-15 16:55:23 -04:00
|
|
|
format *formatXLV3
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// xlDisks mutex to lock xlDisks.
|
|
|
|
xlDisksMu sync.RWMutex
|
|
|
|
|
|
|
|
// Re-ordered list of disks per set.
|
2020-05-28 16:03:04 -04:00
|
|
|
xlDisks [][]StorageAPI
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// Distributed locker clients.
|
|
|
|
xlLockers setsDsyncLockers
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// List of endpoints provided on the command line.
|
2019-11-19 20:42:27 -05:00
|
|
|
endpoints Endpoints
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-03-24 21:53:24 -04:00
|
|
|
// String version of all the endpoints, an optimization
|
|
|
|
// to avoid url.String() conversion taking CPU on
|
|
|
|
// large disk setups.
|
|
|
|
endpointStrings []string
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Total number of sets and the number of disks per set.
|
|
|
|
setCount, drivesPerSet int
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
disksConnectEvent chan diskConnectInfo
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Done channel to control monitoring loop.
|
|
|
|
disksConnectDoneCh chan struct{}
|
|
|
|
|
|
|
|
// Distribution algorithm of choice.
|
|
|
|
distributionAlgo string
|
|
|
|
|
2020-05-26 15:52:24 -04:00
|
|
|
disksStorageInfoCache timedValue
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// Merge tree walk
|
2020-03-22 22:23:47 -04:00
|
|
|
pool *MergeWalkPool
|
|
|
|
poolSplunk *MergeWalkPool
|
2020-01-15 21:30:32 -05:00
|
|
|
|
|
|
|
mrfMU sync.Mutex
|
|
|
|
mrfUploads map[string]int
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-03-25 02:26:13 -04:00
|
|
|
func isEndpointConnected(diskMap map[string]StorageAPI, endpoint string) bool {
|
|
|
|
disk := diskMap[endpoint]
|
|
|
|
if disk == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return disk.IsOnline()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlSets) getDiskMap() map[string]StorageAPI {
|
|
|
|
diskMap := make(map[string]StorageAPI)
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
s.xlDisksMu.RLock()
|
|
|
|
defer s.xlDisksMu.RUnlock()
|
|
|
|
|
|
|
|
for i := 0; i < s.setCount; i++ {
|
|
|
|
for j := 0; j < s.drivesPerSet; j++ {
|
2020-03-25 02:26:13 -04:00
|
|
|
disk := s.xlDisks[i][j]
|
|
|
|
if disk == nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
continue
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
if !disk.IsOnline() {
|
2018-02-15 20:45:57 -05:00
|
|
|
continue
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
diskMap[disk.String()] = disk
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
return diskMap
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initializes a new StorageAPI from the endpoint argument, returns
|
|
|
|
// StorageAPI and also `format` which exists on the disk.
|
2018-03-15 16:55:23 -04:00
|
|
|
func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatXLV3, error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
disk, err := newStorageAPI(endpoint)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
format, err := loadFormatXL(disk)
|
|
|
|
if err != nil {
|
2018-04-04 00:58:48 -04:00
|
|
|
// Close the internal connection to avoid connection leaks.
|
2018-02-15 20:45:57 -05:00
|
|
|
disk.Close()
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return disk, format, nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
// findDiskIndex - returns the i,j'th position of the input `diskID` against the reference
|
|
|
|
// format, after successful validation.
|
|
|
|
// - i'th position is the set index
|
|
|
|
// - j'th position is the disk index in the current set
|
|
|
|
func findDiskIndexByDiskID(refFormat *formatXLV3, diskID string) (int, int, error) {
|
|
|
|
if diskID == offlineDiskUUID {
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s is offline", diskID)
|
|
|
|
}
|
|
|
|
for i := 0; i < len(refFormat.XL.Sets); i++ {
|
|
|
|
for j := 0; j < len(refFormat.XL.Sets[0]); j++ {
|
|
|
|
if refFormat.XL.Sets[i][j] == diskID {
|
|
|
|
return i, j, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s not found", diskID)
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// findDiskIndex - returns the i,j'th position of the input `format` against the reference
|
|
|
|
// format, after successful validation.
|
2020-01-15 21:30:32 -05:00
|
|
|
// - i'th position is the set index
|
|
|
|
// - j'th position is the disk index in the current set
|
2018-03-15 16:55:23 -04:00
|
|
|
func findDiskIndex(refFormat, format *formatXLV3) (int, int, error) {
|
|
|
|
if err := formatXLV3Check(refFormat, format); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return 0, 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if format.XL.This == offlineDiskUUID {
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s is offline", format.XL.This)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < len(refFormat.XL.Sets); i++ {
|
|
|
|
for j := 0; j < len(refFormat.XL.Sets[0]); j++ {
|
|
|
|
if refFormat.XL.Sets[i][j] == format.XL.This {
|
|
|
|
return i, j, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s not found", format.XL.This)
|
|
|
|
}
|
|
|
|
|
2020-01-10 05:35:06 -05:00
|
|
|
// connectDisks - attempt to connect all the endpoints, loads format
|
2018-03-27 21:11:39 -04:00
|
|
|
// and re-arranges the disks in proper position.
|
2020-01-10 05:35:06 -05:00
|
|
|
func (s *xlSets) connectDisks() {
|
2020-03-25 02:26:13 -04:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
diskMap := s.getDiskMap()
|
2020-03-24 21:53:24 -04:00
|
|
|
for i, endpoint := range s.endpoints {
|
2020-03-25 02:26:13 -04:00
|
|
|
if isEndpointConnected(diskMap, s.endpointStrings[i]) {
|
2018-03-27 21:11:39 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
wg.Add(1)
|
|
|
|
go func(endpoint Endpoint) {
|
|
|
|
defer wg.Done()
|
|
|
|
disk, format, err := connectEndpoint(endpoint)
|
|
|
|
if err != nil {
|
|
|
|
printEndpointError(endpoint, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
setIndex, diskIndex, err := findDiskIndex(s.format, format)
|
|
|
|
if err != nil {
|
|
|
|
// Close the internal connection to avoid connection leaks.
|
|
|
|
disk.Close()
|
|
|
|
printEndpointError(endpoint, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
disk.SetDiskID(format.XL.This)
|
|
|
|
s.xlDisksMu.Lock()
|
2020-04-03 21:06:31 -04:00
|
|
|
if s.xlDisks[setIndex][diskIndex] != nil {
|
|
|
|
s.xlDisks[setIndex][diskIndex].Close()
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
s.xlDisks[setIndex][diskIndex] = disk
|
|
|
|
s.xlDisksMu.Unlock()
|
|
|
|
go func(setIndex int) {
|
|
|
|
// Send a new disk connect event with a timeout
|
|
|
|
select {
|
|
|
|
case s.disksConnectEvent <- diskConnectInfo{setIndex: setIndex}:
|
|
|
|
case <-time.After(100 * time.Millisecond):
|
|
|
|
}
|
|
|
|
}(setIndex)
|
|
|
|
}(endpoint)
|
2018-03-27 21:11:39 -04:00
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
wg.Wait()
|
2018-03-27 21:11:39 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected
|
|
|
|
// endpoints by reconnecting them and making sure to place them into right position in
|
|
|
|
// the set topology, this monitoring happens at a given monitoring interval.
|
2020-03-18 19:19:29 -04:00
|
|
|
func (s *xlSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval time.Duration) {
|
2018-02-15 20:45:57 -05:00
|
|
|
for {
|
|
|
|
select {
|
2020-03-18 19:19:29 -04:00
|
|
|
case <-ctx.Done():
|
2018-04-09 13:25:41 -04:00
|
|
|
return
|
|
|
|
case <-s.disksConnectDoneCh:
|
2018-02-15 20:45:57 -05:00
|
|
|
return
|
2020-03-24 21:53:24 -04:00
|
|
|
case <-time.After(monitorInterval):
|
2020-01-10 05:35:06 -05:00
|
|
|
s.connectDisks()
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *xlSets) GetLockers(setIndex int) func() []dsync.NetLocker {
|
|
|
|
return func() []dsync.NetLocker {
|
|
|
|
lockers := make([]dsync.NetLocker, s.drivesPerSet)
|
|
|
|
copy(lockers, s.xlLockers[setIndex])
|
|
|
|
return lockers
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetDisks returns a closure for a given set, which provides list of disks per set.
|
|
|
|
func (s *xlSets) GetDisks(setIndex int) func() []StorageAPI {
|
|
|
|
return func() []StorageAPI {
|
2020-03-25 02:26:13 -04:00
|
|
|
s.xlDisksMu.RLock()
|
|
|
|
defer s.xlDisksMu.RUnlock()
|
2018-02-15 20:45:57 -05:00
|
|
|
disks := make([]StorageAPI, s.drivesPerSet)
|
|
|
|
copy(disks, s.xlDisks[setIndex])
|
|
|
|
return disks
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const defaultMonitorConnectEndpointInterval = time.Second * 10 // Set to 10 secs.
|
|
|
|
|
|
|
|
// Initialize new set of erasure coded sets.
|
2020-04-27 17:39:57 -04:00
|
|
|
func newXLSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatXLV3) (*xlSets, error) {
|
2020-03-24 21:53:24 -04:00
|
|
|
endpointStrings := make([]string, len(endpoints))
|
2020-03-25 02:26:13 -04:00
|
|
|
for i, endpoint := range endpoints {
|
2020-03-24 21:53:24 -04:00
|
|
|
if endpoint.IsLocal {
|
2020-03-25 02:26:13 -04:00
|
|
|
endpointStrings[i] = endpoint.Path
|
2020-03-24 21:53:24 -04:00
|
|
|
} else {
|
2020-03-25 02:26:13 -04:00
|
|
|
endpointStrings[i] = endpoint.String()
|
2020-03-24 21:53:24 -04:00
|
|
|
}
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
|
2020-04-27 17:39:57 -04:00
|
|
|
setCount := len(format.XL.Sets)
|
|
|
|
drivesPerSet := len(format.XL.Sets[0])
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Initialize the XL sets instance.
|
|
|
|
s := &xlSets{
|
|
|
|
sets: make([]*xlObjects, setCount),
|
|
|
|
xlDisks: make([][]StorageAPI, setCount),
|
2019-11-13 15:17:45 -05:00
|
|
|
xlLockers: make([][]dsync.NetLocker, setCount),
|
2018-02-15 20:45:57 -05:00
|
|
|
endpoints: endpoints,
|
2020-03-24 21:53:24 -04:00
|
|
|
endpointStrings: endpointStrings,
|
2018-02-15 20:45:57 -05:00
|
|
|
setCount: setCount,
|
|
|
|
drivesPerSet: drivesPerSet,
|
|
|
|
format: format,
|
2020-01-15 21:30:32 -05:00
|
|
|
disksConnectEvent: make(chan diskConnectInfo),
|
2018-02-15 20:45:57 -05:00
|
|
|
disksConnectDoneCh: make(chan struct{}),
|
|
|
|
distributionAlgo: format.XL.DistributionAlgo,
|
2019-05-14 16:49:10 -04:00
|
|
|
pool: NewMergeWalkPool(globalMergeLookupTimeout),
|
2020-03-22 22:23:47 -04:00
|
|
|
poolSplunk: NewMergeWalkPool(globalMergeLookupTimeout),
|
2020-01-15 21:30:32 -05:00
|
|
|
mrfUploads: make(map[string]int),
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
mutex := newNSLock(globalIsDistXL)
|
2018-06-01 19:41:23 -04:00
|
|
|
|
|
|
|
// Initialize byte pool once for all sets, bpool size is set to
|
|
|
|
// setCount * drivesPerSet with each memory upto blockSizeV1.
|
|
|
|
bp := bpool.NewBytePoolCap(setCount*drivesPerSet, blockSizeV1, blockSizeV1*2)
|
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
for i := 0; i < setCount; i++ {
|
2018-02-15 20:45:57 -05:00
|
|
|
s.xlDisks[i] = make([]StorageAPI, drivesPerSet)
|
2019-11-13 15:17:45 -05:00
|
|
|
s.xlLockers[i] = make([]dsync.NetLocker, drivesPerSet)
|
2020-04-29 16:42:37 -04:00
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-04-29 16:42:37 -04:00
|
|
|
for i := 0; i < setCount; i++ {
|
2020-03-04 19:18:32 -05:00
|
|
|
for j := 0; j < drivesPerSet; j++ {
|
2020-03-27 17:48:30 -04:00
|
|
|
// Rely on endpoints list to initialize, init lockers and available disks.
|
|
|
|
s.xlLockers[i][j] = newLockAPI(s.endpoints[i*drivesPerSet+j])
|
2020-04-29 16:42:37 -04:00
|
|
|
|
2020-04-03 21:06:31 -04:00
|
|
|
disk := storageDisks[i*drivesPerSet+j]
|
|
|
|
if disk == nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-04-03 21:06:31 -04:00
|
|
|
diskID, derr := disk.GetDiskID()
|
|
|
|
if derr != nil {
|
|
|
|
disk.Close()
|
|
|
|
continue
|
2020-03-27 17:48:30 -04:00
|
|
|
}
|
2020-04-03 21:06:31 -04:00
|
|
|
m, n, err := findDiskIndexByDiskID(format, diskID)
|
|
|
|
if err != nil {
|
|
|
|
disk.Close()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
s.xlDisks[m][n] = disk
|
2020-03-04 19:18:32 -05:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Initialize xl objects for a given set.
|
|
|
|
s.sets[i] = &xlObjects{
|
2020-01-15 21:30:32 -05:00
|
|
|
getDisks: s.GetDisks(i),
|
|
|
|
getLockers: s.GetLockers(i),
|
|
|
|
nsMutex: mutex,
|
|
|
|
bp: bp,
|
|
|
|
mrfUploadCh: make(chan partialUpload, 10000),
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2020-01-10 05:35:06 -05:00
|
|
|
|
2020-04-14 20:52:38 -04:00
|
|
|
go s.sets[i].cleanupStaleMultipartUploads(ctx,
|
|
|
|
GlobalMultipartCleanupInterval, GlobalMultipartExpiry, ctx.Done())
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start the disk monitoring and connect routine.
|
2020-04-14 20:52:38 -04:00
|
|
|
go s.monitorAndConnectEndpoints(ctx, defaultMonitorConnectEndpointInterval)
|
2020-01-15 21:30:32 -05:00
|
|
|
go s.maintainMRFList()
|
|
|
|
go s.healMRFRoutine()
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// NewNSLock - initialize a new namespace RWLocker instance.
|
2020-02-21 00:59:57 -05:00
|
|
|
func (s *xlSets) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
|
|
|
|
if len(objects) == 1 {
|
|
|
|
return s.getHashedSet(objects[0]).NewNSLock(ctx, bucket, objects...)
|
|
|
|
}
|
|
|
|
return s.getHashedSet("").NewNSLock(ctx, bucket, objects...)
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
// StorageUsageInfo - combines output of StorageInfo across all erasure coded object sets.
|
|
|
|
// This only returns disk usage info for Zones to perform placement decision, this call
|
|
|
|
// is not implemented in Object interface and is not meant to be used by other object
|
|
|
|
// layer implementations.
|
|
|
|
func (s *xlSets) StorageUsageInfo(ctx context.Context) StorageInfo {
|
|
|
|
storageUsageInfo := func() StorageInfo {
|
|
|
|
var storageInfo StorageInfo
|
|
|
|
storageInfos := make([]StorageInfo, len(s.sets))
|
|
|
|
storageInfo.Backend.Type = BackendErasure
|
|
|
|
|
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
// ignoring errors on purpose
|
|
|
|
storageInfos[index], _ = s.sets[index].StorageInfo(ctx, false)
|
|
|
|
return nil
|
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the go routines.
|
|
|
|
g.Wait()
|
|
|
|
|
|
|
|
for _, lstorageInfo := range storageInfos {
|
|
|
|
storageInfo.Used = append(storageInfo.Used, lstorageInfo.Used...)
|
|
|
|
storageInfo.Total = append(storageInfo.Total, lstorageInfo.Total...)
|
|
|
|
storageInfo.Available = append(storageInfo.Available, lstorageInfo.Available...)
|
|
|
|
storageInfo.MountPaths = append(storageInfo.MountPaths, lstorageInfo.MountPaths...)
|
|
|
|
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks.Merge(lstorageInfo.Backend.OnlineDisks)
|
|
|
|
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks.Merge(lstorageInfo.Backend.OfflineDisks)
|
|
|
|
}
|
|
|
|
|
|
|
|
return storageInfo
|
|
|
|
}
|
|
|
|
|
2020-05-26 15:52:24 -04:00
|
|
|
s.disksStorageInfoCache.Once.Do(func() {
|
|
|
|
s.disksStorageInfoCache.TTL = time.Second
|
|
|
|
s.disksStorageInfoCache.Update = func() (interface{}, error) {
|
2020-05-28 16:03:04 -04:00
|
|
|
return storageUsageInfo(), nil
|
2020-05-26 15:52:24 -04:00
|
|
|
}
|
|
|
|
})
|
2020-05-28 16:03:04 -04:00
|
|
|
|
2020-05-26 15:52:24 -04:00
|
|
|
v, _ := s.disksStorageInfoCache.Get()
|
|
|
|
return v.(StorageInfo)
|
|
|
|
}
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
|
|
|
func (s *xlSets) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
var storageInfo StorageInfo
|
2019-08-22 23:02:40 -04:00
|
|
|
|
|
|
|
storageInfos := make([]StorageInfo, len(s.sets))
|
2020-05-28 16:03:04 -04:00
|
|
|
storageInfoErrs := make([][]error, len(s.sets))
|
2018-08-24 02:35:37 -04:00
|
|
|
storageInfo.Backend.Type = BackendErasure
|
2019-10-14 12:44:51 -04:00
|
|
|
|
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2020-05-28 16:03:04 -04:00
|
|
|
storageInfos[index], storageInfoErrs[index] = s.sets[index].StorageInfo(ctx, local)
|
2019-10-14 12:44:51 -04:00
|
|
|
return nil
|
|
|
|
}, index)
|
2019-08-22 23:02:40 -04:00
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
|
2019-08-22 23:02:40 -04:00
|
|
|
// Wait for the go routines.
|
2019-10-14 12:44:51 -04:00
|
|
|
g.Wait()
|
2019-08-22 23:02:40 -04:00
|
|
|
|
|
|
|
for _, lstorageInfo := range storageInfos {
|
2019-10-23 00:01:14 -04:00
|
|
|
storageInfo.Used = append(storageInfo.Used, lstorageInfo.Used...)
|
|
|
|
storageInfo.Total = append(storageInfo.Total, lstorageInfo.Total...)
|
|
|
|
storageInfo.Available = append(storageInfo.Available, lstorageInfo.Available...)
|
|
|
|
storageInfo.MountPaths = append(storageInfo.MountPaths, lstorageInfo.MountPaths...)
|
|
|
|
storageInfo.Backend.OnlineDisks = storageInfo.Backend.OnlineDisks.Merge(lstorageInfo.Backend.OnlineDisks)
|
|
|
|
storageInfo.Backend.OfflineDisks = storageInfo.Backend.OfflineDisks.Merge(lstorageInfo.Backend.OfflineDisks)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-10-23 01:59:13 -04:00
|
|
|
scParity := globalStorageClass.GetParityForSC(storageclass.STANDARD)
|
2019-10-07 01:50:24 -04:00
|
|
|
if scParity == 0 {
|
|
|
|
scParity = s.drivesPerSet / 2
|
|
|
|
}
|
|
|
|
storageInfo.Backend.StandardSCData = s.drivesPerSet - scParity
|
2018-02-15 20:45:57 -05:00
|
|
|
storageInfo.Backend.StandardSCParity = scParity
|
|
|
|
|
2019-10-23 01:59:13 -04:00
|
|
|
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
|
2019-10-07 01:50:24 -04:00
|
|
|
storageInfo.Backend.RRSCData = s.drivesPerSet - rrSCParity
|
|
|
|
storageInfo.Backend.RRSCParity = rrSCParity
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2018-04-04 00:58:48 -04:00
|
|
|
storageInfo.Backend.Sets = make([][]madmin.DriveInfo, s.setCount)
|
|
|
|
for i := range storageInfo.Backend.Sets {
|
|
|
|
storageInfo.Backend.Sets[i] = make([]madmin.DriveInfo, s.drivesPerSet)
|
|
|
|
}
|
|
|
|
|
2020-03-22 15:16:36 -04:00
|
|
|
if local {
|
2020-05-28 16:03:04 -04:00
|
|
|
// if local is true, we are not interested in the drive UUID info.
|
|
|
|
// this is called primarily by prometheus
|
|
|
|
return storageInfo, nil
|
2020-03-22 15:16:36 -04:00
|
|
|
}
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
for i, set := range s.sets {
|
|
|
|
storageDisks := set.getDisks()
|
|
|
|
for j, storageErr := range storageInfoErrs[i] {
|
|
|
|
if storageDisks[j] == OfflineDisk {
|
2020-03-27 17:48:30 -04:00
|
|
|
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
|
|
|
State: madmin.DriveStateOffline,
|
|
|
|
Endpoint: s.endpointStrings[i*s.drivesPerSet+j],
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2020-05-28 16:03:04 -04:00
|
|
|
var diskID string
|
|
|
|
if storageErr == nil {
|
|
|
|
// No errors returned by storage, look for its DiskID()
|
|
|
|
diskID, storageErr = storageDisks[j].GetDiskID()
|
|
|
|
}
|
|
|
|
if storageErr == nil {
|
|
|
|
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
|
|
|
State: madmin.DriveStateOk,
|
|
|
|
Endpoint: storageDisks[j].String(),
|
|
|
|
UUID: diskID,
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-05-28 16:03:04 -04:00
|
|
|
if storageErr == errUnformattedDisk {
|
|
|
|
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
|
|
|
State: madmin.DriveStateUnformatted,
|
|
|
|
Endpoint: storageDisks[j].String(),
|
|
|
|
UUID: "",
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
storageInfo.Backend.Sets[i][j] = madmin.DriveInfo{
|
|
|
|
State: madmin.DriveStateCorrupt,
|
|
|
|
Endpoint: storageDisks[j].String(),
|
|
|
|
UUID: "",
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
var errs []error
|
|
|
|
for i := range s.sets {
|
|
|
|
errs = append(errs, storageInfoErrs[i]...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return storageInfo, errs
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-04-27 13:06:21 -04:00
|
|
|
func (s *xlSets) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
|
|
|
|
// Use the zone-level implementation instead.
|
2020-03-18 19:19:29 -04:00
|
|
|
return NotImplemented{}
|
2020-01-21 17:07:49 -05:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Shutdown shutsdown all erasure coded sets in parallel
|
|
|
|
// returns error upon first error.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) Shutdown(ctx context.Context) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.sets[index].Shutdown(ctx)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, err := range g.Wait() {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-12 18:20:42 -04:00
|
|
|
// MakeBucketLocation - creates a new bucket across all sets simultaneously,
|
|
|
|
// then return the first encountered error
|
2020-05-08 16:44:44 -04:00
|
|
|
func (s *xlSets) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
// Create buckets in parallel across all sets.
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2020-05-08 16:44:44 -04:00
|
|
|
return s.sets[index].MakeBucketWithLocation(ctx, bucket, location, lockEnabled)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
errs := g.Wait()
|
2020-05-12 18:20:42 -04:00
|
|
|
|
|
|
|
// Return the first encountered error
|
2018-02-15 20:45:57 -05:00
|
|
|
for _, err := range errs {
|
2018-02-16 23:16:48 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// hashes the key returning an integer based on the input algorithm.
|
|
|
|
// This function currently supports
|
|
|
|
// - CRCMOD
|
|
|
|
// - all new algos.
|
|
|
|
func crcHashMod(key string, cardinality int) int {
|
|
|
|
if cardinality <= 0 {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable)
|
|
|
|
return int(keyCrc % uint32(cardinality))
|
|
|
|
}
|
|
|
|
|
|
|
|
func hashKey(algo string, key string, cardinality int) int {
|
|
|
|
switch algo {
|
|
|
|
case formatXLVersionV2DistributionAlgo:
|
|
|
|
return crcHashMod(key, cardinality)
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
|
|
|
// Unknown algorithm returns -1, also if cardinality is lesser than 0.
|
|
|
|
return -1
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
// Returns always a same erasure coded set for a given input.
|
|
|
|
func (s *xlSets) getHashedSetIndex(input string) int {
|
|
|
|
return hashKey(s.distributionAlgo, input, len(s.sets))
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Returns always a same erasure coded set for a given input.
|
|
|
|
func (s *xlSets) getHashedSet(input string) (set *xlObjects) {
|
2019-05-13 15:25:49 -04:00
|
|
|
return s.sets[s.getHashedSetIndex(input)]
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketInfo - returns bucket info from one of the erasure coded set.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
2019-11-19 20:42:27 -05:00
|
|
|
return s.getHashedSet("").GetBucketInfo(ctx, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsV2 lists all objects in bucket filtered by prefix
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) {
|
2018-07-01 00:22:45 -04:00
|
|
|
marker := continuationToken
|
|
|
|
if marker == "" {
|
|
|
|
marker = startAfter
|
|
|
|
}
|
|
|
|
|
|
|
|
loi, err := s.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
2018-02-15 20:45:57 -05:00
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
listObjectsV2Info := ListObjectsV2Info{
|
|
|
|
IsTruncated: loi.IsTruncated,
|
|
|
|
ContinuationToken: continuationToken,
|
|
|
|
NextContinuationToken: loi.NextMarker,
|
|
|
|
Objects: loi.Objects,
|
|
|
|
Prefixes: loi.Prefixes,
|
|
|
|
}
|
|
|
|
return listObjectsV2Info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
|
|
|
func (s *xlSets) IsNotificationSupported() bool {
|
|
|
|
return s.getHashedSet("").IsNotificationSupported()
|
|
|
|
}
|
|
|
|
|
2018-12-05 17:03:42 -05:00
|
|
|
// IsListenBucketSupported returns whether listen bucket notification is applicable for this layer.
|
|
|
|
func (s *xlSets) IsListenBucketSupported() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
|
2018-02-15 20:45:57 -05:00
|
|
|
func (s *xlSets) IsEncryptionSupported() bool {
|
|
|
|
return s.getHashedSet("").IsEncryptionSupported()
|
|
|
|
}
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// IsCompressionSupported returns whether compression is applicable for this layer.
|
|
|
|
func (s *xlSets) IsCompressionSupported() bool {
|
|
|
|
return s.getHashedSet("").IsCompressionSupported()
|
|
|
|
}
|
|
|
|
|
2020-05-23 14:09:35 -04:00
|
|
|
func (s *xlSets) IsTaggingSupported() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// DeleteBucket - deletes a bucket on all sets simultaneously,
|
|
|
|
// even if one of the sets fail to delete buckets, we proceed to
|
|
|
|
// undo a successful operation.
|
2020-03-28 00:52:59 -04:00
|
|
|
func (s *xlSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
// Delete buckets in parallel across all sets.
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2020-03-28 00:52:59 -04:00
|
|
|
return s.sets[index].DeleteBucket(ctx, bucket, forceDelete)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
errs := g.Wait()
|
2020-04-27 17:18:02 -04:00
|
|
|
// For any failure, we attempt undo all the delete buckets operation
|
|
|
|
// by creating buckets again on all sets which were successfully deleted.
|
2018-02-15 20:45:57 -05:00
|
|
|
for _, err := range errs {
|
2018-02-16 23:16:48 -05:00
|
|
|
if err != nil {
|
2020-04-27 17:18:02 -04:00
|
|
|
undoDeleteBucketSets(bucket, s.sets, errs)
|
2018-02-16 23:16:48 -05:00
|
|
|
return err
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all bucket metadata.
|
2020-05-19 16:53:54 -04:00
|
|
|
deleteBucketMetadata(ctx, s, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function is used to undo a successful DeleteBucket operation.
|
|
|
|
func undoDeleteBucketSets(bucket string, sets []*xlObjects, errs []error) {
|
|
|
|
g := errgroup.WithNErrs(len(sets))
|
|
|
|
|
|
|
|
// Undo previous delete bucket on all underlying sets.
|
|
|
|
for index := range sets {
|
|
|
|
index := index
|
2019-10-14 12:44:51 -04:00
|
|
|
g.Go(func() error {
|
|
|
|
if errs[index] == nil {
|
2020-05-08 16:44:44 -04:00
|
|
|
return sets[index].MakeBucketWithLocation(GlobalContext, bucket, "", false)
|
2019-10-14 12:44:51 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}, index)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
g.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// List all buckets from one of the set, we are not doing merge
|
|
|
|
// sort here just for simplification. As per design it is assumed
|
|
|
|
// that all buckets are present on all sets.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// Always lists from the same set signified by the empty string.
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.getHashedSet("").ListBuckets(ctx)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// --- Object Operations ---
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
// GetObjectNInfo - returns object info and locked object ReadCloser
|
2018-09-27 06:06:45 -04:00
|
|
|
func (s *xlSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
|
|
|
return s.getHashedSet(object).GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// GetObject - reads an object from the hashedSet based on the object name.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (s *xlSets) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) error {
|
|
|
|
return s.getHashedSet(object).GetObject(ctx, bucket, object, startOffset, length, writer, etag, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutObject - writes an object to hashedSet based on the object name.
|
2019-02-09 00:31:06 -05:00
|
|
|
func (s *xlSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
return s.getHashedSet(object).PutObject(ctx, bucket, object, data, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetObjectInfo - reads object metadata from the hashedSet based on the object name.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (s *xlSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
return s.getHashedSet(object).GetObjectInfo(ctx, bucket, object, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObject - deletes an object from the hashedSet based on the object name.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) DeleteObject(ctx context.Context, bucket string, object string) (err error) {
|
|
|
|
return s.getHashedSet(object).DeleteObject(ctx, bucket, object)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
// DeleteObjects - bulk delete of objects
|
|
|
|
// Bulk delete is only possible within one set. For that purpose
|
|
|
|
// objects are group by set first, and then bulk delete is invoked
|
|
|
|
// for each set, the error response of each delete will be returned
|
|
|
|
func (s *xlSets) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
|
|
|
|
type delObj struct {
|
|
|
|
// Set index associated to this object
|
|
|
|
setIndex int
|
|
|
|
// Original index from the list of arguments
|
|
|
|
// where this object is passed
|
|
|
|
origIndex int
|
|
|
|
// Object name
|
|
|
|
name string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transform []delObj to the list of object names
|
|
|
|
toNames := func(delObjs []delObj) []string {
|
|
|
|
names := make([]string, len(delObjs))
|
|
|
|
for i, obj := range delObjs {
|
|
|
|
names[i] = obj.name
|
|
|
|
}
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
|
|
|
// The result of delete operation on all passed objects
|
|
|
|
var delErrs = make([]error, len(objects))
|
|
|
|
|
|
|
|
// A map between a set and its associated objects
|
|
|
|
var objSetMap = make(map[int][]delObj)
|
|
|
|
|
|
|
|
// Group objects by set index
|
|
|
|
for i, object := range objects {
|
|
|
|
index := s.getHashedSetIndex(object)
|
|
|
|
objSetMap[index] = append(objSetMap[index], delObj{setIndex: index, origIndex: i, name: object})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Invoke bulk delete on objects per set and save
|
|
|
|
// the result of the delete operation
|
|
|
|
for _, objsGroup := range objSetMap {
|
|
|
|
errs, err := s.getHashedSet(objsGroup[0].name).DeleteObjects(ctx, bucket, toNames(objsGroup))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for i, obj := range objsGroup {
|
|
|
|
delErrs[obj.origIndex] = errs[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return delErrs, nil
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// CopyObject - copies objects from one hashedSet to another hashedSet, on server side.
|
2018-09-10 12:42:43 -04:00
|
|
|
func (s *xlSets) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
srcSet := s.getHashedSet(srcObject)
|
|
|
|
destSet := s.getHashedSet(destObject)
|
|
|
|
|
|
|
|
// Check if this request is only metadata update.
|
2018-02-23 18:07:21 -05:00
|
|
|
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(destBucket, destObject))
|
|
|
|
if cpSrcDstSame && srcInfo.metadataOnly {
|
2018-09-10 12:42:43 -04:00
|
|
|
return srcSet.CopyObject(ctx, srcBucket, srcObject, destBucket, destObject, srcInfo, srcOpts, dstOpts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-02-09 00:31:06 -05:00
|
|
|
putOpts := ObjectOptions{ServerSideEncryption: dstOpts.ServerSideEncryption, UserDefined: srcInfo.UserDefined}
|
|
|
|
return destSet.putObject(ctx, destBucket, destObject, srcInfo.PutObjReader, putOpts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// FileInfoCh - file info channel
|
|
|
|
type FileInfoCh struct {
|
|
|
|
Ch chan FileInfo
|
|
|
|
Prev FileInfo
|
|
|
|
Valid bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pop - pops a cached entry if any, or from the cached channel.
|
|
|
|
func (f *FileInfoCh) Pop() (fi FileInfo, ok bool) {
|
|
|
|
if f.Valid {
|
|
|
|
f.Valid = false
|
|
|
|
return f.Prev, true
|
|
|
|
} // No cached entries found, read from channel
|
|
|
|
f.Prev, ok = <-f.Ch
|
|
|
|
return f.Prev, ok
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push - cache an entry, for Pop() later.
|
|
|
|
func (f *FileInfoCh) Push(fi FileInfo) {
|
|
|
|
f.Prev = fi
|
|
|
|
f.Valid = true
|
|
|
|
}
|
|
|
|
|
2019-09-16 20:38:37 -04:00
|
|
|
// Calculate least entry across multiple FileInfo channels,
|
|
|
|
// returns the least common entry and the total number of times
|
|
|
|
// we found this entry. Additionally also returns a boolean
|
|
|
|
// to indicate if the caller needs to call this function
|
|
|
|
// again to list the next entry. It is callers responsibility
|
|
|
|
// if the caller wishes to list N entries to call leastEntry
|
|
|
|
// N times until this boolean is 'false'.
|
|
|
|
func leastEntry(entryChs []FileInfoCh, entries []FileInfo, entriesValid []bool) (FileInfo, int, bool) {
|
|
|
|
for i := range entryChs {
|
|
|
|
entries[i], entriesValid[i] = entryChs[i].Pop()
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
var isTruncated = false
|
|
|
|
for _, valid := range entriesValid {
|
|
|
|
if !valid {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
isTruncated = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
var lentry FileInfo
|
|
|
|
var found bool
|
|
|
|
for i, valid := range entriesValid {
|
|
|
|
if !valid {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
lentry = entries[i]
|
|
|
|
found = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if entries[i].Name < lentry.Name {
|
|
|
|
lentry = entries[i]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We haven't been able to find any least entry,
|
2019-09-16 20:38:37 -04:00
|
|
|
// this would mean that we don't have valid entry.
|
2019-05-14 16:49:10 -04:00
|
|
|
if !found {
|
2019-09-16 20:38:37 -04:00
|
|
|
return lentry, 0, isTruncated
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
leastEntryCount := 0
|
|
|
|
for i, valid := range entriesValid {
|
|
|
|
if !valid {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Entries are duplicated across disks,
|
|
|
|
// we should simply skip such entries.
|
|
|
|
if lentry.Name == entries[i].Name && lentry.ModTime.Equal(entries[i].ModTime) {
|
|
|
|
leastEntryCount++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push all entries which are lexically higher
|
|
|
|
// and will be returned later in Pop()
|
2019-09-16 20:38:37 -04:00
|
|
|
entryChs[i].Push(entries[i])
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
|
2019-09-16 20:38:37 -04:00
|
|
|
return lentry, leastEntryCount, isTruncated
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// mergeEntriesCh - merges FileInfo channel to entries upto maxKeys.
|
2020-03-22 19:33:49 -04:00
|
|
|
func mergeEntriesCh(entryChs []FileInfoCh, maxKeys int, ndisks int) (entries FilesInfo) {
|
2019-05-22 16:36:16 -04:00
|
|
|
var i = 0
|
2019-09-16 20:38:37 -04:00
|
|
|
entriesInfos := make([]FileInfo, len(entryChs))
|
|
|
|
entriesValid := make([]bool, len(entryChs))
|
2019-05-22 16:36:16 -04:00
|
|
|
for {
|
2019-09-16 20:38:37 -04:00
|
|
|
fi, quorumCount, valid := leastEntry(entryChs, entriesInfos, entriesValid)
|
2019-05-22 16:36:16 -04:00
|
|
|
if !valid {
|
2019-09-16 20:38:37 -04:00
|
|
|
// We have reached EOF across all entryChs, break the loop.
|
2019-05-22 16:36:16 -04:00
|
|
|
break
|
|
|
|
}
|
2019-09-16 20:38:37 -04:00
|
|
|
|
2020-03-22 19:33:49 -04:00
|
|
|
if quorumCount < ndisks-1 {
|
|
|
|
// Skip entries which are not found on upto ndisks.
|
|
|
|
continue
|
2019-09-16 20:38:37 -04:00
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
entries.Files = append(entries.Files, fi)
|
|
|
|
i++
|
2019-09-16 20:38:37 -04:00
|
|
|
if i == maxKeys {
|
|
|
|
entries.IsTruncated = isTruncated(entryChs, entriesInfos, entriesValid)
|
|
|
|
break
|
|
|
|
}
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
return entries
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2019-09-16 20:38:37 -04:00
|
|
|
func isTruncated(entryChs []FileInfoCh, entries []FileInfo, entriesValid []bool) bool {
|
|
|
|
for i := range entryChs {
|
|
|
|
entries[i], entriesValid[i] = entryChs[i].Pop()
|
|
|
|
}
|
|
|
|
|
|
|
|
var isTruncated = false
|
|
|
|
for _, valid := range entriesValid {
|
|
|
|
if !valid {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
isTruncated = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for i := range entryChs {
|
|
|
|
if entriesValid[i] {
|
|
|
|
entryChs[i].Push(entries[i])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return isTruncated
|
|
|
|
}
|
|
|
|
|
2020-02-25 10:52:28 -05:00
|
|
|
func (s *xlSets) startMergeWalks(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}) []FileInfoCh {
|
2020-03-22 19:33:49 -04:00
|
|
|
return s.startMergeWalksN(ctx, bucket, prefix, marker, recursive, endWalkCh, -1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Starts a walk channel across all disks and returns a slice of
|
|
|
|
// FileInfo channels which can be read from.
|
|
|
|
func (s *xlSets) startMergeWalksN(ctx context.Context, bucket, prefix, marker string, recursive bool, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh {
|
2019-05-14 16:49:10 -04:00
|
|
|
var entryChs []FileInfoCh
|
2020-03-22 19:33:49 -04:00
|
|
|
var success int
|
2019-04-17 12:52:08 -04:00
|
|
|
for _, set := range s.sets {
|
2020-03-22 19:33:49 -04:00
|
|
|
// Reset for the next erasure set.
|
|
|
|
success = ndisks
|
|
|
|
for _, disk := range set.getLoadBalancedDisks() {
|
2019-05-14 16:49:10 -04:00
|
|
|
if disk == nil {
|
|
|
|
// Disk can be offline
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
entryCh, err := disk.Walk(bucket, prefix, marker, recursive, xlMetaJSONFile, readMetadata, endWalkCh)
|
|
|
|
if err != nil {
|
|
|
|
// Disk walk returned error, ignore it.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
entryChs = append(entryChs, FileInfoCh{
|
|
|
|
Ch: entryCh,
|
|
|
|
})
|
2020-03-22 19:33:49 -04:00
|
|
|
success--
|
|
|
|
if success == 0 {
|
|
|
|
break
|
|
|
|
}
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return entryChs
|
2020-03-22 22:23:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Starts a walk channel across all disks and returns a slice of
|
|
|
|
// FileInfo channels which can be read from.
|
|
|
|
func (s *xlSets) startSplunkMergeWalksN(ctx context.Context, bucket, prefix, marker string, endWalkCh <-chan struct{}, ndisks int) []FileInfoCh {
|
|
|
|
var entryChs []FileInfoCh
|
|
|
|
var success int
|
|
|
|
for _, set := range s.sets {
|
|
|
|
// Reset for the next erasure set.
|
|
|
|
success = ndisks
|
|
|
|
for _, disk := range set.getLoadBalancedDisks() {
|
|
|
|
if disk == nil {
|
|
|
|
// Disk can be offline
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
entryCh, err := disk.WalkSplunk(bucket, prefix, marker, endWalkCh)
|
|
|
|
if err != nil {
|
|
|
|
// Disk walk returned error, ignore it.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
entryChs = append(entryChs, FileInfoCh{
|
|
|
|
Ch: entryCh,
|
|
|
|
})
|
|
|
|
success--
|
|
|
|
if success == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return entryChs
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
|
2019-07-05 17:06:12 -04:00
|
|
|
func (s *xlSets) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
|
|
|
endWalkCh := make(chan struct{})
|
|
|
|
defer close(endWalkCh)
|
2020-02-25 10:52:28 -05:00
|
|
|
|
2020-03-22 19:33:49 -04:00
|
|
|
const ndisks = 3
|
2020-04-09 12:30:02 -04:00
|
|
|
entryChs := s.startMergeWalksN(GlobalContext, bucket, prefix, "", true, endWalkCh, ndisks)
|
2019-07-05 17:06:12 -04:00
|
|
|
|
|
|
|
var objInfos []ObjectInfo
|
|
|
|
var eof bool
|
|
|
|
var prevPrefix string
|
|
|
|
|
2019-09-16 20:38:37 -04:00
|
|
|
entriesValid := make([]bool, len(entryChs))
|
|
|
|
entries := make([]FileInfo, len(entryChs))
|
2019-07-05 17:06:12 -04:00
|
|
|
for {
|
|
|
|
if len(objInfos) == maxKeys {
|
|
|
|
break
|
|
|
|
}
|
2020-03-22 19:33:49 -04:00
|
|
|
|
2019-09-16 20:38:37 -04:00
|
|
|
result, quorumCount, ok := leastEntry(entryChs, entries, entriesValid)
|
2019-07-05 17:06:12 -04:00
|
|
|
if !ok {
|
|
|
|
eof = true
|
|
|
|
break
|
|
|
|
}
|
2020-03-22 19:33:49 -04:00
|
|
|
|
|
|
|
if quorumCount < ndisks-1 {
|
|
|
|
// Skip entries which are not found on upto ndisks.
|
2019-09-16 20:38:37 -04:00
|
|
|
continue
|
|
|
|
}
|
2019-07-05 17:06:12 -04:00
|
|
|
|
|
|
|
var objInfo ObjectInfo
|
|
|
|
|
|
|
|
index := strings.Index(strings.TrimPrefix(result.Name, prefix), delimiter)
|
|
|
|
if index == -1 {
|
|
|
|
objInfo = ObjectInfo{
|
|
|
|
IsDir: false,
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: result.Name,
|
|
|
|
ModTime: result.ModTime,
|
|
|
|
Size: result.Size,
|
|
|
|
ContentType: result.Metadata["content-type"],
|
|
|
|
ContentEncoding: result.Metadata["content-encoding"],
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract etag from metadata.
|
|
|
|
objInfo.ETag = extractETag(result.Metadata)
|
|
|
|
|
|
|
|
// All the parts per object.
|
|
|
|
objInfo.Parts = result.Parts
|
|
|
|
|
|
|
|
// etag/md5Sum has already been extracted. We need to
|
|
|
|
// remove to avoid it from appearing as part of
|
|
|
|
// response headers. e.g, X-Minio-* or X-Amz-*.
|
|
|
|
objInfo.UserDefined = cleanMetadata(result.Metadata)
|
|
|
|
|
|
|
|
// Update storage class
|
2019-10-07 01:50:24 -04:00
|
|
|
if sc, ok := result.Metadata[xhttp.AmzStorageClass]; ok {
|
2019-07-05 17:06:12 -04:00
|
|
|
objInfo.StorageClass = sc
|
|
|
|
} else {
|
|
|
|
objInfo.StorageClass = globalMinioDefaultStorageClass
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
index = len(prefix) + index + len(delimiter)
|
|
|
|
currPrefix := result.Name[:index]
|
|
|
|
if currPrefix == prevPrefix {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevPrefix = currPrefix
|
|
|
|
|
|
|
|
objInfo = ObjectInfo{
|
|
|
|
Bucket: bucket,
|
|
|
|
Name: currPrefix,
|
|
|
|
IsDir: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if objInfo.Name <= marker {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
objInfos = append(objInfos, objInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
result := ListObjectsInfo{}
|
|
|
|
for _, objInfo := range objInfos {
|
|
|
|
if objInfo.IsDir {
|
|
|
|
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.Objects = append(result.Objects, objInfo)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !eof {
|
|
|
|
result.IsTruncated = true
|
|
|
|
if len(objInfos) > 0 {
|
|
|
|
result.NextMarker = objInfos[len(objInfos)-1].Name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
2020-01-30 06:50:07 -05:00
|
|
|
// ListObjects - implements listing of objects across disks, each disk is independently
|
2019-05-14 16:49:10 -04:00
|
|
|
// walked and merged at this layer. Resulting value through the merge process sends
|
|
|
|
// the data in lexically sorted order.
|
2020-01-30 06:50:07 -05:00
|
|
|
// If partialQuorumOnly is set only objects that does not have full quorum is returned.
|
2020-03-22 19:33:49 -04:00
|
|
|
func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
2020-02-25 10:52:28 -05:00
|
|
|
if err = checkListObjsArgs(ctx, bucket, prefix, marker, s); err != nil {
|
2019-05-14 16:49:10 -04:00
|
|
|
return loi, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Marker is set validate pre-condition.
|
|
|
|
if marker != "" {
|
|
|
|
// Marker not common with prefix is not implemented. Send an empty response
|
2019-12-06 02:16:06 -05:00
|
|
|
if !HasPrefix(marker, prefix) {
|
2019-05-14 16:49:10 -04:00
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// With max keys of zero we have reached eof, return right here.
|
|
|
|
if maxKeys == 0 {
|
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// For delimiter and prefix as '/' we do not list anything at all
|
|
|
|
// since according to s3 spec we stop at the 'delimiter'
|
|
|
|
// along // with the prefix. On a flat namespace with 'prefix'
|
|
|
|
// as '/' we don't have any entries, since all the keys are
|
|
|
|
// of form 'keyName/...'
|
2019-08-06 15:08:58 -04:00
|
|
|
if delimiter == SlashSeparator && prefix == SlashSeparator {
|
2019-05-14 16:49:10 -04:00
|
|
|
return loi, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Over flowing count - reset to maxObjectList.
|
|
|
|
if maxKeys < 0 || maxKeys > maxObjectList {
|
|
|
|
maxKeys = maxObjectList
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-11-19 20:42:27 -05:00
|
|
|
if delimiter != SlashSeparator && delimiter != "" {
|
|
|
|
// "heal" option passed can be ignored as the heal-listing does not send non-standard delimiter.
|
|
|
|
return s.listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
// Default is recursive, if delimiter is set then list non recursive.
|
|
|
|
recursive := true
|
2019-08-06 15:08:58 -04:00
|
|
|
if delimiter == SlashSeparator {
|
2019-05-14 16:49:10 -04:00
|
|
|
recursive = false
|
|
|
|
}
|
|
|
|
|
2020-03-22 19:33:49 -04:00
|
|
|
const ndisks = 3
|
|
|
|
|
2020-01-30 06:50:07 -05:00
|
|
|
entryChs, endWalkCh := s.pool.Release(listParams{bucket: bucket, recursive: recursive, marker: marker, prefix: prefix})
|
2019-05-14 16:49:10 -04:00
|
|
|
if entryChs == nil {
|
|
|
|
endWalkCh = make(chan struct{})
|
2020-03-22 19:33:49 -04:00
|
|
|
// start file tree walk across at most randomly 3 disks in a set.
|
2020-04-09 12:30:02 -04:00
|
|
|
entryChs = s.startMergeWalksN(GlobalContext, bucket, prefix, marker, recursive, endWalkCh, ndisks)
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
|
2020-03-22 19:33:49 -04:00
|
|
|
entries := mergeEntriesCh(entryChs, maxKeys, ndisks)
|
2019-05-14 16:49:10 -04:00
|
|
|
if len(entries.Files) == 0 {
|
|
|
|
return loi, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-05-14 16:49:10 -04:00
|
|
|
loi.IsTruncated = entries.IsTruncated
|
|
|
|
if loi.IsTruncated {
|
|
|
|
loi.NextMarker = entries.Files[len(entries.Files)-1].Name
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, entry := range entries.Files {
|
2020-02-25 10:52:28 -05:00
|
|
|
objInfo := entry.ToObjectInfo()
|
|
|
|
if HasSuffix(objInfo.Name, SlashSeparator) && !recursive {
|
|
|
|
loi.Prefixes = append(loi.Prefixes, entry.Name)
|
|
|
|
continue
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
loi.Objects = append(loi.Objects, objInfo)
|
|
|
|
}
|
|
|
|
if loi.IsTruncated {
|
2020-01-30 06:50:07 -05:00
|
|
|
s.pool.Set(listParams{bucket, recursive, loi.NextMarker, prefix}, entryChs, endWalkCh)
|
2019-05-14 16:49:10 -04:00
|
|
|
}
|
|
|
|
return loi, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-06-09 01:14:07 -04:00
|
|
|
// ListObjects - implements listing of objects across disks, each disk is indepenently
|
|
|
|
// walked and merged at this layer. Resulting value through the merge process sends
|
|
|
|
// the data in lexically sorted order.
|
|
|
|
func (s *xlSets) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
|
2020-03-22 19:33:49 -04:00
|
|
|
return s.listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
|
2019-06-09 01:14:07 -04:00
|
|
|
}
|
|
|
|
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// In list multipart uploads we are going to treat input prefix as the object,
|
|
|
|
// this means that we are not supporting directory navigation.
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.getHashedSet(prefix).ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initiate a new multipart upload on a hashedSet based on object name.
|
2019-02-09 00:31:06 -05:00
|
|
|
func (s *xlSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
|
|
|
return s.getHashedSet(object).NewMultipartUpload(ctx, bucket, object, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copies a part of an object from source hashedSet to destination hashedSet.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
2018-09-10 12:42:43 -04:00
|
|
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
destSet := s.getHashedSet(destObject)
|
|
|
|
|
2018-11-14 20:36:41 -05:00
|
|
|
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
2018-11-14 20:36:41 -05:00
|
|
|
func (s *xlSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
2018-09-10 12:42:43 -04:00
|
|
|
return s.getHashedSet(object).PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-05-28 15:36:20 -04:00
|
|
|
// GetMultipartInfo - return multipart metadata info uploaded at hashedSet.
|
|
|
|
func (s *xlSets) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (result MultipartInfo, err error) {
|
|
|
|
return s.getHashedSet(object).GetMultipartInfo(ctx, bucket, object, uploadID, opts)
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
2019-01-05 17:16:43 -05:00
|
|
|
func (s *xlSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
|
|
|
|
return s.getHashedSet(object).ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Aborts an in-progress multipart operation on hashedSet based on the object name.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
|
|
|
|
return s.getHashedSet(object).AbortMultipartUpload(ctx, bucket, object, uploadID)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
2018-11-14 20:36:41 -05:00
|
|
|
func (s *xlSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
|
|
|
return s.getHashedSet(object).CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
All disks online
|
|
|
|
-----------------
|
|
|
|
- All Unformatted - format all and return success.
|
|
|
|
- Some Unformatted - format all and return success.
|
|
|
|
- Any JBOD inconsistent - return failure
|
|
|
|
- Some are corrupt (missing format.json) - return failure
|
|
|
|
- Any unrecognized disks - return failure
|
|
|
|
|
|
|
|
Some disks are offline and we have quorum.
|
|
|
|
-----------------
|
|
|
|
- Some unformatted - format all and return success,
|
|
|
|
treat disks offline as corrupted.
|
|
|
|
- Any JBOD inconsistent - return failure
|
|
|
|
- Some are corrupt (missing format.json)
|
|
|
|
- Any unrecognized disks - return failure
|
|
|
|
|
|
|
|
No read quorum
|
|
|
|
-----------------
|
|
|
|
failure for all cases.
|
|
|
|
|
|
|
|
// Pseudo code for managing `format.json`.
|
|
|
|
|
|
|
|
// Generic checks.
|
|
|
|
if (no quorum) return error
|
|
|
|
if (any disk is corrupt) return error // Always error
|
|
|
|
if (jbod inconsistent) return error // Always error.
|
|
|
|
if (disks not recognized) // Always error.
|
|
|
|
|
|
|
|
// Specific checks.
|
|
|
|
if (all disks online)
|
|
|
|
if (all disks return format.json)
|
|
|
|
if (jbod consistent)
|
|
|
|
if (all disks recognized)
|
|
|
|
return
|
|
|
|
else
|
|
|
|
if (all disks return format.json not found)
|
|
|
|
return error
|
|
|
|
else (some disks return format.json not found)
|
|
|
|
(heal format)
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
if (some disks return format.json not found)
|
|
|
|
// Offline disks are marked as dead.
|
|
|
|
(heal format) // Offline disks should be marked as dead.
|
|
|
|
return success
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
*/
|
|
|
|
|
2019-11-19 20:42:27 -05:00
|
|
|
func formatsToDrivesInfo(endpoints Endpoints, formats []*formatXLV3, sErrs []error) (beforeDrives []madmin.DriveInfo) {
|
2019-08-30 17:11:18 -04:00
|
|
|
beforeDrives = make([]madmin.DriveInfo, len(endpoints))
|
2018-02-15 20:45:57 -05:00
|
|
|
// Existing formats are available (i.e. ok), so save it in
|
|
|
|
// result, also populate disks to be healed.
|
|
|
|
for i, format := range formats {
|
|
|
|
drive := endpoints.GetString(i)
|
2019-08-30 17:11:18 -04:00
|
|
|
var state = madmin.DriveStateCorrupt
|
2018-02-15 20:45:57 -05:00
|
|
|
switch {
|
|
|
|
case format != nil:
|
2019-08-30 17:11:18 -04:00
|
|
|
state = madmin.DriveStateOk
|
2018-02-15 20:45:57 -05:00
|
|
|
case sErrs[i] == errUnformattedDisk:
|
2019-08-30 17:11:18 -04:00
|
|
|
state = madmin.DriveStateMissing
|
2019-08-02 15:17:26 -04:00
|
|
|
case sErrs[i] == errDiskNotFound:
|
2019-08-30 17:11:18 -04:00
|
|
|
state = madmin.DriveStateOffline
|
|
|
|
}
|
|
|
|
beforeDrives[i] = madmin.DriveInfo{
|
|
|
|
UUID: func() string {
|
|
|
|
if format != nil {
|
|
|
|
return format.XL.This
|
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}(),
|
|
|
|
Endpoint: drive,
|
|
|
|
State: state,
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return beforeDrives
|
|
|
|
}
|
|
|
|
|
2018-04-09 13:25:41 -04:00
|
|
|
// Reloads the format from the disk, usually called by a remote peer notifier while
|
|
|
|
// healing in a distributed setup.
|
|
|
|
func (s *xlSets) ReloadFormat(ctx context.Context, dryRun bool) (err error) {
|
2019-09-27 19:47:12 -04:00
|
|
|
storageDisks, errs := initStorageDisksWithErrors(s.endpoints)
|
|
|
|
for i, err := range errs {
|
|
|
|
if err != nil && err != errDiskNotFound {
|
|
|
|
return fmt.Errorf("Disk %s: %w", s.endpoints[i], err)
|
|
|
|
}
|
2018-04-09 13:25:41 -04:00
|
|
|
}
|
|
|
|
defer func(storageDisks []StorageAPI) {
|
|
|
|
if err != nil {
|
|
|
|
closeStorageDisks(storageDisks)
|
|
|
|
}
|
|
|
|
}(storageDisks)
|
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
formats, sErrs := loadFormatXLAll(storageDisks, false)
|
2020-03-08 16:30:25 -04:00
|
|
|
if err = checkFormatXLValues(formats, s.drivesPerSet); err != nil {
|
2018-04-09 13:25:41 -04:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for index, sErr := range sErrs {
|
|
|
|
if sErr != nil {
|
|
|
|
// Look for acceptable heal errors, for any other
|
|
|
|
// errors we should simply quit and return.
|
|
|
|
if _, ok := formatHealErrors[sErr]; !ok {
|
2019-12-02 12:28:01 -05:00
|
|
|
return fmt.Errorf("Disk %s: %w", s.endpoints[index], sErr)
|
2018-04-09 13:25:41 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
refFormat, err := getFormatXLInQuorum(formats)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// kill the monitoring loop such that we stop writing
|
|
|
|
// to indicate that we will re-initialize everything
|
|
|
|
// with new format.
|
|
|
|
s.disksConnectDoneCh <- struct{}{}
|
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
// Replace with new reference format.
|
2018-04-09 13:25:41 -04:00
|
|
|
s.format = refFormat
|
|
|
|
|
2020-01-10 05:35:06 -05:00
|
|
|
// Close all existing disks and reconnect all the disks.
|
2020-01-14 20:13:58 -05:00
|
|
|
s.xlDisksMu.Lock()
|
2020-03-27 17:48:30 -04:00
|
|
|
for _, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
diskID, err := disk.GetDiskID()
|
|
|
|
if err != nil {
|
2020-04-03 21:06:31 -04:00
|
|
|
disk.Close()
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
m, n, err := findDiskIndexByDiskID(refFormat, diskID)
|
|
|
|
if err != nil {
|
2020-04-03 21:06:31 -04:00
|
|
|
disk.Close()
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.xlDisks[m][n] != nil {
|
|
|
|
s.xlDisks[m][n].Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
s.xlDisks[m][n] = disk
|
|
|
|
}
|
2020-01-14 20:13:58 -05:00
|
|
|
s.xlDisksMu.Unlock()
|
2018-04-09 13:25:41 -04:00
|
|
|
|
|
|
|
// Restart monitoring loop to monitor reformatted disks again.
|
2020-03-18 19:19:29 -04:00
|
|
|
go s.monitorAndConnectEndpoints(GlobalContext, defaultMonitorConnectEndpointInterval)
|
2018-04-09 13:25:41 -04:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-02-06 14:44:19 -05:00
|
|
|
// If it is a single node XL and all disks are root disks, it is most likely a test setup, else it is a production setup.
|
|
|
|
// On a test setup we allow creation of format.json on root disks to help with dev/testing.
|
|
|
|
func isTestSetup(infos []DiskInfo, errs []error) bool {
|
|
|
|
rootDiskCount := 0
|
|
|
|
for i := range errs {
|
|
|
|
if errs[i] != nil {
|
|
|
|
// On error it is safer to assume that this is not a test setup.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if infos[i].RootDisk {
|
|
|
|
rootDiskCount++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// It is a test setup if all disks are root disks.
|
|
|
|
return rootDiskCount == len(infos)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getAllDiskInfos(storageDisks []StorageAPI) ([]DiskInfo, []error) {
|
|
|
|
infos := make([]DiskInfo, len(storageDisks))
|
2019-10-14 12:44:51 -04:00
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
|
|
|
for index := range storageDisks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
var err error
|
|
|
|
if storageDisks[index] != nil {
|
|
|
|
infos[index], err = storageDisks[index].DiskInfo()
|
|
|
|
} else {
|
|
|
|
// Disk not found.
|
|
|
|
err = errDiskNotFound
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
return infos, g.Wait()
|
2019-02-06 14:44:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mark root disks as down so as not to heal them.
|
|
|
|
func markRootDisksAsDown(storageDisks []StorageAPI) {
|
|
|
|
infos, errs := getAllDiskInfos(storageDisks)
|
|
|
|
if isTestSetup(infos, errs) {
|
|
|
|
// Allow healing of disks for test setups to help with testing.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for i := range storageDisks {
|
|
|
|
if errs[i] != nil {
|
|
|
|
storageDisks[i] = nil
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if infos[i].RootDisk {
|
|
|
|
// We should not heal on root disk. i.e in a situation where the minio-administrator has unmounted a
|
|
|
|
// defective drive we should not heal a path on the root disk.
|
2020-04-28 17:55:01 -04:00
|
|
|
logger.Info("Disk `%s` is a root disk. Please ensure the disk is mounted properly, refusing to use root disk.",
|
|
|
|
storageDisks[i].String())
|
2019-02-06 14:44:19 -05:00
|
|
|
storageDisks[i] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-30 23:37:39 -04:00
|
|
|
// HealFormat - heals missing `format.json` on fresh unformatted disks.
|
2018-04-09 13:25:41 -04:00
|
|
|
func (s *xlSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) {
|
2019-09-27 19:47:12 -04:00
|
|
|
storageDisks, errs := initStorageDisksWithErrors(s.endpoints)
|
|
|
|
for i, derr := range errs {
|
|
|
|
if derr != nil && derr != errDiskNotFound {
|
|
|
|
return madmin.HealResultItem{}, fmt.Errorf("Disk %s: %w", s.endpoints[i], derr)
|
|
|
|
}
|
2018-04-04 00:58:48 -04:00
|
|
|
}
|
2018-04-09 13:25:41 -04:00
|
|
|
|
|
|
|
defer func(storageDisks []StorageAPI) {
|
|
|
|
if err != nil {
|
|
|
|
closeStorageDisks(storageDisks)
|
|
|
|
}
|
|
|
|
}(storageDisks)
|
2018-04-04 00:58:48 -04:00
|
|
|
|
2019-02-06 14:44:19 -05:00
|
|
|
markRootDisksAsDown(storageDisks)
|
2019-01-23 18:29:29 -05:00
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
formats, sErrs := loadFormatXLAll(storageDisks, true)
|
2020-03-08 16:30:25 -04:00
|
|
|
if err = checkFormatXLValues(formats, s.drivesPerSet); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return madmin.HealResultItem{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Prepare heal-result
|
2018-04-09 13:25:41 -04:00
|
|
|
res = madmin.HealResultItem{
|
2018-02-15 20:45:57 -05:00
|
|
|
Type: madmin.HealItemMetadata,
|
|
|
|
Detail: "disk-format",
|
|
|
|
DiskCount: s.setCount * s.drivesPerSet,
|
|
|
|
SetCount: s.setCount,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch all the drive info status.
|
|
|
|
beforeDrives := formatsToDrivesInfo(s.endpoints, formats, sErrs)
|
|
|
|
|
|
|
|
res.After.Drives = make([]madmin.HealDriveInfo, len(beforeDrives))
|
|
|
|
res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives))
|
|
|
|
// Copy "after" drive state too from before.
|
|
|
|
for k, v := range beforeDrives {
|
2019-02-13 07:59:36 -05:00
|
|
|
res.Before.Drives[k] = madmin.HealDriveInfo(v)
|
|
|
|
res.After.Drives[k] = madmin.HealDriveInfo(v)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
for index, sErr := range sErrs {
|
|
|
|
if sErr != nil {
|
|
|
|
// Look for acceptable heal errors, for any other
|
|
|
|
// errors we should simply quit and return.
|
|
|
|
if _, ok := formatHealErrors[sErr]; !ok {
|
2019-12-02 12:28:01 -05:00
|
|
|
return res, fmt.Errorf("Disk %s: %w", s.endpoints[index], sErr)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-24 21:47:26 -04:00
|
|
|
if countErrs(sErrs, errUnformattedDisk) == 0 {
|
2018-04-30 23:37:39 -04:00
|
|
|
// No unformatted disks found disks are either offline
|
|
|
|
// or online, no healing is required.
|
|
|
|
return res, errNoHealRequired
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
refFormat, err := getFormatXLInQuorum(formats)
|
|
|
|
if err != nil {
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark all UUIDs which might be offline, use list
|
|
|
|
// of formats to mark them appropriately.
|
|
|
|
markUUIDsOffline(refFormat, formats)
|
|
|
|
|
|
|
|
// Initialize a new set of set formats which will be written to disk.
|
|
|
|
newFormatSets := newHealFormatSets(refFormat, s.setCount, s.drivesPerSet, formats, sErrs)
|
|
|
|
|
|
|
|
// Look for all offline/unformatted disks in our reference format,
|
|
|
|
// such that we can fill them up with new UUIDs, this looping also
|
|
|
|
// ensures that the replaced disks allocated evenly across all sets.
|
|
|
|
// Making sure that the redundancy is not lost.
|
|
|
|
for i := range refFormat.XL.Sets {
|
|
|
|
for j := range refFormat.XL.Sets[i] {
|
|
|
|
if refFormat.XL.Sets[i][j] == offlineDiskUUID {
|
|
|
|
for l := range newFormatSets[i] {
|
|
|
|
if newFormatSets[i][l] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if newFormatSets[i][l].XL.This == "" {
|
|
|
|
newFormatSets[i][l].XL.This = mustGetUUID()
|
|
|
|
refFormat.XL.Sets[i][j] = newFormatSets[i][l].XL.This
|
|
|
|
for m, v := range res.After.Drives {
|
|
|
|
if v.Endpoint == s.endpoints.GetString(i*s.drivesPerSet+l) {
|
|
|
|
res.After.Drives[m].UUID = newFormatSets[i][l].XL.This
|
|
|
|
res.After.Drives[m].State = madmin.DriveStateOk
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !dryRun {
|
2018-03-15 16:55:23 -04:00
|
|
|
var tmpNewFormats = make([]*formatXLV3, s.setCount*s.drivesPerSet)
|
2018-02-15 20:45:57 -05:00
|
|
|
for i := range newFormatSets {
|
|
|
|
for j := range newFormatSets[i] {
|
|
|
|
if newFormatSets[i][j] == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
tmpNewFormats[i*s.drivesPerSet+j] = newFormatSets[i][j]
|
|
|
|
tmpNewFormats[i*s.drivesPerSet+j].XL.Sets = refFormat.XL.Sets
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save formats `format.json` across all disks.
|
2018-04-05 18:04:40 -04:00
|
|
|
if err = saveFormatXLAll(ctx, storageDisks, tmpNewFormats); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return madmin.HealResultItem{}, err
|
|
|
|
}
|
|
|
|
|
2018-04-09 13:25:41 -04:00
|
|
|
// kill the monitoring loop such that we stop writing
|
|
|
|
// to indicate that we will re-initialize everything
|
|
|
|
// with new format.
|
|
|
|
s.disksConnectDoneCh <- struct{}{}
|
|
|
|
|
|
|
|
// Replace with new reference format.
|
2018-02-15 20:45:57 -05:00
|
|
|
s.format = refFormat
|
2018-03-27 21:11:39 -04:00
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// Disconnect/relinquish all existing disks, lockers and reconnect the disks, lockers.
|
2020-01-14 20:13:58 -05:00
|
|
|
s.xlDisksMu.Lock()
|
2020-03-27 17:48:30 -04:00
|
|
|
for _, disk := range storageDisks {
|
|
|
|
if disk == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
diskID, err := disk.GetDiskID()
|
|
|
|
if err != nil {
|
2020-04-03 21:06:31 -04:00
|
|
|
disk.Close()
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
m, n, err := findDiskIndexByDiskID(refFormat, diskID)
|
|
|
|
if err != nil {
|
2020-04-03 21:06:31 -04:00
|
|
|
disk.Close()
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if s.xlDisks[m][n] != nil {
|
|
|
|
s.xlDisks[m][n].Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
s.xlDisks[m][n] = disk
|
|
|
|
}
|
2020-01-14 20:13:58 -05:00
|
|
|
s.xlDisksMu.Unlock()
|
2018-04-09 13:25:41 -04:00
|
|
|
|
|
|
|
// Restart our monitoring loop to start monitoring newly formatted disks.
|
2020-03-18 19:19:29 -04:00
|
|
|
go s.monitorAndConnectEndpoints(GlobalContext, defaultMonitorConnectEndpointInterval)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// HealBucket - heals inconsistent buckets and bucket metadata on all sets.
|
2019-02-10 22:53:13 -05:00
|
|
|
func (s *xlSets) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (result madmin.HealResultItem, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// Initialize heal result info
|
2019-02-10 22:53:13 -05:00
|
|
|
result = madmin.HealResultItem{
|
2018-02-15 20:45:57 -05:00
|
|
|
Type: madmin.HealItemBucket,
|
|
|
|
Bucket: bucket,
|
|
|
|
DiskCount: s.setCount * s.drivesPerSet,
|
|
|
|
SetCount: s.setCount,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, s := range s.sets {
|
2019-02-10 22:53:13 -05:00
|
|
|
var healResult madmin.HealResultItem
|
|
|
|
healResult, err = s.HealBucket(ctx, bucket, dryRun, remove)
|
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
2019-02-13 07:59:36 -05:00
|
|
|
result.Before.Drives = append(result.Before.Drives, healResult.Before.Drives...)
|
|
|
|
result.After.Drives = append(result.After.Drives, healResult.After.Drives...)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-03-24 21:53:24 -04:00
|
|
|
for i := range s.endpoints {
|
2018-02-15 20:45:57 -05:00
|
|
|
var foundBefore bool
|
2019-02-10 22:53:13 -05:00
|
|
|
for _, v := range result.Before.Drives {
|
2020-03-24 21:53:24 -04:00
|
|
|
if s.endpointStrings[i] == v.Endpoint {
|
|
|
|
foundBefore = true
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundBefore {
|
2019-02-10 22:53:13 -05:00
|
|
|
result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{
|
2018-02-15 20:45:57 -05:00
|
|
|
UUID: "",
|
2020-03-24 21:53:24 -04:00
|
|
|
Endpoint: s.endpointStrings[i],
|
2018-02-15 20:45:57 -05:00
|
|
|
State: madmin.DriveStateOffline,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
var foundAfter bool
|
2019-02-10 22:53:13 -05:00
|
|
|
for _, v := range result.After.Drives {
|
2020-03-24 21:53:24 -04:00
|
|
|
if s.endpointStrings[i] == v.Endpoint {
|
|
|
|
foundAfter = true
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if !foundAfter {
|
2019-02-10 22:53:13 -05:00
|
|
|
result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{
|
2018-02-15 20:45:57 -05:00
|
|
|
UUID: "",
|
2020-03-24 21:53:24 -04:00
|
|
|
Endpoint: s.endpointStrings[i],
|
2018-02-15 20:45:57 -05:00
|
|
|
State: madmin.DriveStateOffline,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we had quorum to write, if not return an appropriate error.
|
2019-02-10 22:53:13 -05:00
|
|
|
_, afterDriveOnline := result.GetOnlineCounts()
|
2018-03-26 19:36:57 -04:00
|
|
|
if afterDriveOnline < ((s.setCount*s.drivesPerSet)/2)+1 {
|
2019-02-10 22:53:13 -05:00
|
|
|
return result, toObjectErr(errXLWriteQuorum, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-02-10 22:53:13 -05:00
|
|
|
return result, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// HealObject - heals inconsistent object on a hashedSet based on object name.
|
2020-03-18 20:50:00 -04:00
|
|
|
func (s *xlSets) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
|
|
|
return s.getHashedSet(object).HealObject(ctx, bucket, object, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lists all buckets which need healing.
|
2018-03-14 15:01:47 -04:00
|
|
|
func (s *xlSets) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
|
2020-05-06 17:25:05 -04:00
|
|
|
var listBuckets []BucketInfo
|
|
|
|
var healBuckets = make(map[string]VolInfo)
|
2018-02-15 20:45:57 -05:00
|
|
|
for _, set := range s.sets {
|
2020-05-06 17:25:05 -04:00
|
|
|
// lists all unique buckets across drives.
|
|
|
|
if err := listAllBuckets(set.getDisks(), healBuckets); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2020-05-06 17:25:05 -04:00
|
|
|
for _, v := range healBuckets {
|
|
|
|
listBuckets = append(listBuckets, BucketInfo(v))
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2020-05-06 17:25:05 -04:00
|
|
|
sort.Sort(byBucketName(listBuckets))
|
2018-02-15 20:45:57 -05:00
|
|
|
return listBuckets, nil
|
|
|
|
}
|
|
|
|
|
2020-02-25 10:52:28 -05:00
|
|
|
// Walk a bucket, optionally prefix recursively, until we have returned
|
|
|
|
// all the content to objectInfo channel, it is callers responsibility
|
|
|
|
// to allocate a receive channel for ObjectInfo, upon any unhandled
|
|
|
|
// error walker returns error. Optionally if context.Done() is received
|
|
|
|
// then Walk() stops the walker.
|
|
|
|
func (s *xlSets) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error {
|
|
|
|
if err := checkListObjsArgs(ctx, bucket, prefix, "", s); err != nil {
|
2020-02-25 22:58:58 -05:00
|
|
|
// Upon error close the channel.
|
|
|
|
close(results)
|
2020-02-25 10:52:28 -05:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
entryChs := s.startMergeWalks(ctx, bucket, prefix, "", true, ctx.Done())
|
|
|
|
|
|
|
|
entriesValid := make([]bool, len(entryChs))
|
|
|
|
entries := make([]FileInfo, len(entryChs))
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(results)
|
|
|
|
|
|
|
|
for {
|
|
|
|
entry, quorumCount, ok := leastEntry(entryChs, entries, entriesValid)
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-18 23:56:07 -04:00
|
|
|
if quorumCount >= s.drivesPerSet/2 {
|
|
|
|
results <- entry.ToObjectInfo() // Read quorum exists proceed
|
2020-02-25 10:52:28 -05:00
|
|
|
}
|
2020-03-18 23:56:07 -04:00
|
|
|
// skip entries which do not have quorum
|
2020-02-25 10:52:28 -05:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-13 20:35:09 -04:00
|
|
|
// HealObjects - Heal all objects recursively at a specified prefix, any
|
|
|
|
// dangling objects deleted as well automatically.
|
2020-03-18 20:50:00 -04:00
|
|
|
func (s *xlSets) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObject healObjectFn) error {
|
2020-01-29 01:35:44 -05:00
|
|
|
endWalkCh := make(chan struct{})
|
|
|
|
defer close(endWalkCh)
|
|
|
|
|
2020-02-25 10:52:28 -05:00
|
|
|
entryChs := s.startMergeWalks(ctx, bucket, prefix, "", true, endWalkCh)
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-01-29 01:35:44 -05:00
|
|
|
entriesValid := make([]bool, len(entryChs))
|
|
|
|
entries := make([]FileInfo, len(entryChs))
|
2019-03-13 20:35:09 -04:00
|
|
|
for {
|
2020-01-29 01:35:44 -05:00
|
|
|
entry, quorumCount, ok := leastEntry(entryChs, entries, entriesValid)
|
|
|
|
if !ok {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-03-18 20:50:00 -04:00
|
|
|
if quorumCount == s.drivesPerSet && opts.ScanMode == madmin.HealNormalScan {
|
2020-01-29 01:35:44 -05:00
|
|
|
// Skip good entries.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-02-13 17:01:41 -05:00
|
|
|
// Wait and proceed if there are active requests
|
2020-02-13 09:36:23 -05:00
|
|
|
waitForLowHTTPReq(int32(s.drivesPerSet))
|
2019-10-21 17:43:04 -04:00
|
|
|
|
2020-01-29 01:35:44 -05:00
|
|
|
if err := healObject(bucket, entry.Name); err != nil {
|
|
|
|
return toObjectErr(err, bucket, entry.Name)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 20:35:09 -04:00
|
|
|
return nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
2019-06-09 01:14:07 -04:00
|
|
|
|
2020-05-23 14:09:35 -04:00
|
|
|
// PutObjectTags - replace or add tags to an existing object
|
|
|
|
func (s *xlSets) PutObjectTags(ctx context.Context, bucket, object string, tags string) error {
|
|
|
|
return s.getHashedSet(object).PutObjectTags(ctx, bucket, object, tags)
|
2020-01-20 11:45:59 -05:00
|
|
|
}
|
|
|
|
|
2020-05-23 14:09:35 -04:00
|
|
|
// DeleteObjectTags - delete object tags from an existing object
|
|
|
|
func (s *xlSets) DeleteObjectTags(ctx context.Context, bucket, object string) error {
|
|
|
|
return s.getHashedSet(object).DeleteObjectTags(ctx, bucket, object)
|
2020-01-20 11:45:59 -05:00
|
|
|
}
|
|
|
|
|
2020-05-23 14:09:35 -04:00
|
|
|
// GetObjectTags - get object tags from an existing object
|
|
|
|
func (s *xlSets) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) {
|
|
|
|
return s.getHashedSet(object).GetObjectTags(ctx, bucket, object)
|
2020-01-20 11:45:59 -05:00
|
|
|
}
|
|
|
|
|
2019-12-06 02:16:06 -05:00
|
|
|
// GetMetrics - no op
|
|
|
|
func (s *xlSets) GetMetrics(ctx context.Context) (*Metrics, error) {
|
|
|
|
logger.LogIf(ctx, NotImplemented{})
|
|
|
|
return &Metrics{}, NotImplemented{}
|
|
|
|
}
|
2019-12-28 11:54:43 -05:00
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
// maintainMRFList gathers the list of successful partial uploads
|
|
|
|
// from all underlying xl sets and puts them in a global map which
|
|
|
|
// should not have more than 10000 entries.
|
|
|
|
func (s *xlSets) maintainMRFList() {
|
|
|
|
var agg = make(chan partialUpload, 10000)
|
|
|
|
for i, xl := range s.sets {
|
|
|
|
go func(c <-chan partialUpload, setIndex int) {
|
|
|
|
for msg := range c {
|
|
|
|
msg.failedSet = setIndex
|
|
|
|
select {
|
|
|
|
case agg <- msg:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}(xl.mrfUploadCh, i)
|
|
|
|
}
|
|
|
|
|
|
|
|
for fUpload := range agg {
|
|
|
|
s.mrfMU.Lock()
|
|
|
|
if len(s.mrfUploads) > 10000 {
|
|
|
|
s.mrfMU.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
s.mrfUploads[pathJoin(fUpload.bucket, fUpload.object)] = fUpload.failedSet
|
|
|
|
s.mrfMU.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// healMRFRoutine monitors new disks connection, sweep the MRF list
|
|
|
|
// to find objects related to the new disk that needs to be healed.
|
|
|
|
func (s *xlSets) healMRFRoutine() {
|
|
|
|
// Wait until background heal state is initialized
|
|
|
|
var bgSeq *healSequence
|
|
|
|
for {
|
2020-03-18 19:19:29 -04:00
|
|
|
if globalBackgroundHealState == nil {
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
continue
|
|
|
|
}
|
2020-01-15 21:30:32 -05:00
|
|
|
var ok bool
|
|
|
|
bgSeq, ok = globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
|
|
|
|
if ok {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
time.Sleep(time.Second)
|
|
|
|
}
|
|
|
|
|
|
|
|
for e := range s.disksConnectEvent {
|
|
|
|
// Get the list of objects related the xl set
|
|
|
|
// to which the connected disk belongs.
|
|
|
|
var mrfUploads []string
|
|
|
|
s.mrfMU.Lock()
|
|
|
|
for k, v := range s.mrfUploads {
|
|
|
|
if v == e.setIndex {
|
|
|
|
mrfUploads = append(mrfUploads, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.mrfMU.Unlock()
|
|
|
|
|
|
|
|
// Heal objects
|
|
|
|
for _, u := range mrfUploads {
|
|
|
|
// Send an object to be healed with a timeout
|
|
|
|
select {
|
2020-04-01 15:14:00 -04:00
|
|
|
case bgSeq.sourceCh <- healSource{path: u}:
|
2020-01-15 21:30:32 -05:00
|
|
|
case <-time.After(100 * time.Millisecond):
|
|
|
|
}
|
|
|
|
|
|
|
|
s.mrfMU.Lock()
|
|
|
|
delete(s.mrfUploads, u)
|
|
|
|
s.mrfMU.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|