2018-02-15 20:45:57 -05:00
|
|
|
/*
|
2019-10-21 17:43:04 -04:00
|
|
|
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
|
2018-02-15 20:45:57 -05:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
2018-03-14 15:01:47 -04:00
|
|
|
"context"
|
2021-02-26 19:53:06 -05:00
|
|
|
"encoding/binary"
|
2020-08-18 17:37:26 -04:00
|
|
|
"errors"
|
2018-02-15 20:45:57 -05:00
|
|
|
"fmt"
|
|
|
|
"hash/crc32"
|
2020-10-13 21:28:42 -04:00
|
|
|
"math/rand"
|
2018-09-20 22:22:09 -04:00
|
|
|
"net/http"
|
2020-05-06 17:25:05 -04:00
|
|
|
"sort"
|
2018-02-15 20:45:57 -05:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
"github.com/dchest/siphash"
|
[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 17:09:34 -05:00
|
|
|
"github.com/dustin/go-humanize"
|
2020-06-12 23:04:01 -04:00
|
|
|
"github.com/google/uuid"
|
2020-12-10 10:28:37 -05:00
|
|
|
"github.com/minio/minio-go/v7/pkg/set"
|
2020-07-14 12:38:05 -04:00
|
|
|
"github.com/minio/minio-go/v7/pkg/tags"
|
2019-12-06 02:16:06 -05:00
|
|
|
"github.com/minio/minio/cmd/logger"
|
2018-02-15 20:45:57 -05:00
|
|
|
"github.com/minio/minio/pkg/bpool"
|
2020-12-17 19:52:47 -05:00
|
|
|
"github.com/minio/minio/pkg/console"
|
2019-11-13 15:17:45 -05:00
|
|
|
"github.com/minio/minio/pkg/dsync"
|
2018-02-15 20:45:57 -05:00
|
|
|
"github.com/minio/minio/pkg/madmin"
|
|
|
|
"github.com/minio/minio/pkg/sync/errgroup"
|
|
|
|
)
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// setsDsyncLockers is encapsulated type for Close()
|
|
|
|
type setsDsyncLockers [][]dsync.NetLocker
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
// Information of a new disk connection
|
|
|
|
type diskConnectInfo struct {
|
|
|
|
setIndex int
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// erasureSets implements ObjectLayer combining a static list of erasure coded
|
2018-02-15 20:45:57 -05:00
|
|
|
// object sets. NOTE: There is no dynamic scaling allowed or intended in
|
|
|
|
// current design.
|
2020-06-12 23:04:01 -04:00
|
|
|
type erasureSets struct {
|
2020-05-19 16:53:54 -04:00
|
|
|
GatewayUnsupported
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
sets []*erasureObjects
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Reference format.
|
2020-06-12 23:04:01 -04:00
|
|
|
format *formatErasureV3
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// erasureDisks mutex to lock erasureDisks.
|
|
|
|
erasureDisksMu sync.RWMutex
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Re-ordered list of disks per set.
|
2020-06-12 23:04:01 -04:00
|
|
|
erasureDisks [][]StorageAPI
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// Distributed locker clients.
|
2020-06-12 23:04:01 -04:00
|
|
|
erasureLockers setsDsyncLockers
|
2019-11-13 15:17:45 -05:00
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
// Distributed lock owner (constant per running instance).
|
|
|
|
erasureLockOwner string
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// List of endpoints provided on the command line.
|
2019-11-19 20:42:27 -05:00
|
|
|
endpoints Endpoints
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-03-24 21:53:24 -04:00
|
|
|
// String version of all the endpoints, an optimization
|
|
|
|
// to avoid url.String() conversion taking CPU on
|
|
|
|
// large disk setups.
|
|
|
|
endpointStrings []string
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Total number of sets and the number of disks per set.
|
2020-08-26 22:29:35 -04:00
|
|
|
setCount, setDriveCount int
|
2021-01-16 15:08:02 -05:00
|
|
|
defaultParityCount int
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
poolIndex int
|
2021-01-26 16:21:51 -05:00
|
|
|
|
2020-10-31 17:10:12 -04:00
|
|
|
disksConnectEvent chan diskConnectInfo
|
2020-01-15 21:30:32 -05:00
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Distribution algorithm of choice.
|
|
|
|
distributionAlgo string
|
2020-06-12 23:04:01 -04:00
|
|
|
deploymentID [16]byte
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-05-26 15:52:24 -04:00
|
|
|
disksStorageInfoCache timedValue
|
|
|
|
|
2020-06-29 16:07:26 -04:00
|
|
|
mrfMU sync.Mutex
|
|
|
|
mrfOperations map[healSource]int
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-03-25 02:26:13 -04:00
|
|
|
func isEndpointConnected(diskMap map[string]StorageAPI, endpoint string) bool {
|
|
|
|
disk := diskMap[endpoint]
|
|
|
|
if disk == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return disk.IsOnline()
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) getDiskMap() map[string]StorageAPI {
|
2020-03-25 02:26:13 -04:00
|
|
|
diskMap := make(map[string]StorageAPI)
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
s.erasureDisksMu.RLock()
|
|
|
|
defer s.erasureDisksMu.RUnlock()
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
for i := 0; i < s.setCount; i++ {
|
2020-08-26 22:29:35 -04:00
|
|
|
for j := 0; j < s.setDriveCount; j++ {
|
2020-06-12 23:04:01 -04:00
|
|
|
disk := s.erasureDisks[i][j]
|
2020-09-17 00:14:35 -04:00
|
|
|
if disk == OfflineDisk {
|
2018-02-15 20:45:57 -05:00
|
|
|
continue
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
if !disk.IsOnline() {
|
2018-02-15 20:45:57 -05:00
|
|
|
continue
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
diskMap[disk.String()] = disk
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
return diskMap
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initializes a new StorageAPI from the endpoint argument, returns
|
|
|
|
// StorageAPI and also `format` which exists on the disk.
|
2020-06-12 23:04:01 -04:00
|
|
|
func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatErasureV3, error) {
|
2020-10-26 13:29:29 -04:00
|
|
|
disk, err := newStorageAPIWithoutHealthCheck(endpoint)
|
2018-02-15 20:45:57 -05:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
format, err := loadFormatErasure(disk)
|
2018-02-15 20:45:57 -05:00
|
|
|
if err != nil {
|
2020-08-18 17:37:26 -04:00
|
|
|
if errors.Is(err, errUnformattedDisk) {
|
2020-09-04 12:45:06 -04:00
|
|
|
info, derr := disk.DiskInfo(context.TODO())
|
2020-08-18 17:37:26 -04:00
|
|
|
if derr != nil && info.RootDisk {
|
2020-09-04 20:09:02 -04:00
|
|
|
return nil, nil, fmt.Errorf("Disk: %s returned %w", disk, derr) // make sure to '%w' to wrap the error
|
2020-08-18 17:37:26 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, nil, fmt.Errorf("Disk: %s returned %w", disk, err) // make sure to '%w' to wrap the error
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return disk, format, nil
|
|
|
|
}
|
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
// findDiskIndex - returns the i,j'th position of the input `diskID` against the reference
|
|
|
|
// format, after successful validation.
|
|
|
|
// - i'th position is the set index
|
|
|
|
// - j'th position is the disk index in the current set
|
2020-06-12 23:04:01 -04:00
|
|
|
func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int, error) {
|
2020-03-27 17:48:30 -04:00
|
|
|
if diskID == offlineDiskUUID {
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s is offline", diskID)
|
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
|
|
|
|
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
|
|
|
|
if refFormat.Erasure.Sets[i][j] == diskID {
|
2020-03-27 17:48:30 -04:00
|
|
|
return i, j, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s not found", diskID)
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// findDiskIndex - returns the i,j'th position of the input `format` against the reference
|
|
|
|
// format, after successful validation.
|
2020-01-15 21:30:32 -05:00
|
|
|
// - i'th position is the set index
|
|
|
|
// - j'th position is the disk index in the current set
|
2020-06-12 23:04:01 -04:00
|
|
|
func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
|
|
|
|
if err := formatErasureV3Check(refFormat, format); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return 0, 0, err
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if format.Erasure.This == offlineDiskUUID {
|
|
|
|
return -1, -1, fmt.Errorf("diskID: %s is offline", format.Erasure.This)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
for i := 0; i < len(refFormat.Erasure.Sets); i++ {
|
|
|
|
for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ {
|
|
|
|
if refFormat.Erasure.Sets[i][j] == format.Erasure.This {
|
2018-02-15 20:45:57 -05:00
|
|
|
return i, j, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
return -1, -1, fmt.Errorf("diskID: %s not found", format.Erasure.This)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-01-10 05:35:06 -05:00
|
|
|
// connectDisks - attempt to connect all the endpoints, loads format
|
2018-03-27 21:11:39 -04:00
|
|
|
// and re-arranges the disks in proper position.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) connectDisks() {
|
2020-03-25 02:26:13 -04:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
diskMap := s.getDiskMap()
|
2020-06-10 20:10:31 -04:00
|
|
|
for _, endpoint := range s.endpoints {
|
2020-06-11 11:05:25 -04:00
|
|
|
diskPath := endpoint.String()
|
|
|
|
if endpoint.IsLocal {
|
|
|
|
diskPath = endpoint.Path
|
|
|
|
}
|
|
|
|
if isEndpointConnected(diskMap, diskPath) {
|
2018-03-27 21:11:39 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
wg.Add(1)
|
|
|
|
go func(endpoint Endpoint) {
|
|
|
|
defer wg.Done()
|
|
|
|
disk, format, err := connectEndpoint(endpoint)
|
|
|
|
if err != nil {
|
2020-09-04 20:09:02 -04:00
|
|
|
if endpoint.IsLocal && errors.Is(err, errUnformattedDisk) {
|
|
|
|
globalBackgroundHealState.pushHealLocalDisks(endpoint)
|
2020-09-17 00:14:35 -04:00
|
|
|
logger.Info(fmt.Sprintf("Found unformatted drive %s, attempting to heal...", endpoint))
|
2020-09-04 20:09:02 -04:00
|
|
|
} else {
|
|
|
|
printEndpointError(endpoint, err, true)
|
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
return
|
|
|
|
}
|
2021-03-04 17:36:23 -05:00
|
|
|
if disk.IsLocal() && disk.Healing() != nil {
|
2020-10-24 16:23:08 -04:00
|
|
|
globalBackgroundHealState.pushHealLocalDisks(disk.Endpoint())
|
|
|
|
logger.Info(fmt.Sprintf("Found the drive %s that needs healing, attempting to heal...", disk))
|
|
|
|
}
|
2020-09-17 00:14:35 -04:00
|
|
|
s.erasureDisksMu.RLock()
|
2020-03-25 02:26:13 -04:00
|
|
|
setIndex, diskIndex, err := findDiskIndex(s.format, format)
|
2020-09-17 00:14:35 -04:00
|
|
|
s.erasureDisksMu.RUnlock()
|
2020-03-25 02:26:13 -04:00
|
|
|
if err != nil {
|
2020-10-24 16:23:08 -04:00
|
|
|
printEndpointError(endpoint, err, false)
|
2020-03-25 02:26:13 -04:00
|
|
|
return
|
|
|
|
}
|
2020-09-28 22:39:32 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
s.erasureDisksMu.Lock()
|
|
|
|
if s.erasureDisks[setIndex][diskIndex] != nil {
|
|
|
|
s.erasureDisks[setIndex][diskIndex].Close()
|
2020-04-03 21:06:31 -04:00
|
|
|
}
|
2020-10-26 13:29:29 -04:00
|
|
|
if disk.IsLocal() {
|
|
|
|
disk.SetDiskID(format.Erasure.This)
|
|
|
|
s.erasureDisks[setIndex][diskIndex] = disk
|
|
|
|
} else {
|
|
|
|
// Enable healthcheck disk for remote endpoint.
|
|
|
|
disk, err = newStorageAPI(endpoint)
|
|
|
|
if err != nil {
|
|
|
|
printEndpointError(endpoint, err, false)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
disk.SetDiskID(format.Erasure.This)
|
|
|
|
s.erasureDisks[setIndex][diskIndex] = disk
|
|
|
|
}
|
2021-03-04 17:36:23 -05:00
|
|
|
disk.SetDiskLoc(s.poolIndex, setIndex, diskIndex)
|
2020-08-26 22:29:35 -04:00
|
|
|
s.endpointStrings[setIndex*s.setDriveCount+diskIndex] = disk.String()
|
2020-06-12 23:04:01 -04:00
|
|
|
s.erasureDisksMu.Unlock()
|
2020-03-25 02:26:13 -04:00
|
|
|
go func(setIndex int) {
|
2020-12-16 17:33:05 -05:00
|
|
|
idler := time.NewTimer(100 * time.Millisecond)
|
|
|
|
defer idler.Stop()
|
|
|
|
|
2020-03-25 02:26:13 -04:00
|
|
|
// Send a new disk connect event with a timeout
|
|
|
|
select {
|
|
|
|
case s.disksConnectEvent <- diskConnectInfo{setIndex: setIndex}:
|
2020-12-16 17:33:05 -05:00
|
|
|
case <-idler.C:
|
2020-03-25 02:26:13 -04:00
|
|
|
}
|
|
|
|
}(setIndex)
|
|
|
|
}(endpoint)
|
2018-03-27 21:11:39 -04:00
|
|
|
}
|
2020-03-25 02:26:13 -04:00
|
|
|
wg.Wait()
|
2018-03-27 21:11:39 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// monitorAndConnectEndpoints this is a monitoring loop to keep track of disconnected
|
|
|
|
// endpoints by reconnecting them and making sure to place them into right position in
|
|
|
|
// the set topology, this monitoring happens at a given monitoring interval.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInterval time.Duration) {
|
2020-10-13 21:28:42 -04:00
|
|
|
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
|
|
|
|
|
|
|
time.Sleep(time.Duration(r.Float64() * float64(time.Second)))
|
|
|
|
|
|
|
|
// Pre-emptively connect the disks if possible.
|
|
|
|
s.connectDisks()
|
|
|
|
|
2020-12-16 17:33:05 -05:00
|
|
|
monitor := time.NewTimer(monitorInterval)
|
|
|
|
defer monitor.Stop()
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
for {
|
|
|
|
select {
|
2020-03-18 19:19:29 -04:00
|
|
|
case <-ctx.Done():
|
2018-04-09 13:25:41 -04:00
|
|
|
return
|
2020-12-16 17:33:05 -05:00
|
|
|
case <-monitor.C:
|
2020-12-17 15:35:02 -05:00
|
|
|
// Reset the timer once fired for required interval.
|
|
|
|
monitor.Reset(monitorInterval)
|
2020-12-16 17:33:05 -05:00
|
|
|
|
2020-12-17 19:52:47 -05:00
|
|
|
if serverDebugLog {
|
|
|
|
console.Debugln("running disk monitoring")
|
|
|
|
}
|
|
|
|
|
2020-12-17 15:35:02 -05:00
|
|
|
s.connectDisks()
|
2020-12-16 17:33:05 -05:00
|
|
|
}
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-25 22:21:52 -04:00
|
|
|
func (s *erasureSets) GetLockers(setIndex int) func() ([]dsync.NetLocker, string) {
|
|
|
|
return func() ([]dsync.NetLocker, string) {
|
2020-12-10 10:28:37 -05:00
|
|
|
lockers := make([]dsync.NetLocker, len(s.erasureLockers[setIndex]))
|
2020-06-12 23:04:01 -04:00
|
|
|
copy(lockers, s.erasureLockers[setIndex])
|
2020-09-25 22:21:52 -04:00
|
|
|
return lockers, s.erasureLockOwner
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) GetEndpoints(setIndex int) func() []string {
|
2020-06-10 20:10:31 -04:00
|
|
|
return func() []string {
|
2020-06-12 23:04:01 -04:00
|
|
|
s.erasureDisksMu.RLock()
|
|
|
|
defer s.erasureDisksMu.RUnlock()
|
2020-06-10 20:10:31 -04:00
|
|
|
|
2020-08-26 22:29:35 -04:00
|
|
|
eps := make([]string, s.setDriveCount)
|
|
|
|
for i := 0; i < s.setDriveCount; i++ {
|
|
|
|
eps[i] = s.endpointStrings[setIndex*s.setDriveCount+i]
|
2020-06-10 20:10:31 -04:00
|
|
|
}
|
|
|
|
return eps
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// GetDisks returns a closure for a given set, which provides list of disks per set.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) GetDisks(setIndex int) func() []StorageAPI {
|
2018-02-15 20:45:57 -05:00
|
|
|
return func() []StorageAPI {
|
2020-06-12 23:04:01 -04:00
|
|
|
s.erasureDisksMu.RLock()
|
|
|
|
defer s.erasureDisksMu.RUnlock()
|
2020-08-26 22:29:35 -04:00
|
|
|
disks := make([]StorageAPI, s.setDriveCount)
|
2020-06-12 23:04:01 -04:00
|
|
|
copy(disks, s.erasureDisks[setIndex])
|
2018-02-15 20:45:57 -05:00
|
|
|
return disks
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-04 20:09:02 -04:00
|
|
|
// defaultMonitorConnectEndpointInterval is the interval to monitor endpoint connections.
|
|
|
|
// Must be bigger than defaultMonitorNewDiskInterval.
|
|
|
|
const defaultMonitorConnectEndpointInterval = defaultMonitorNewDiskInterval + time.Second*5
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Initialize new set of erasure coded sets.
|
2021-03-04 17:36:23 -05:00
|
|
|
func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []StorageAPI, format *formatErasureV3, defaultParityCount, poolIdx int) (*erasureSets, error) {
|
2020-06-12 23:04:01 -04:00
|
|
|
setCount := len(format.Erasure.Sets)
|
2020-08-26 22:29:35 -04:00
|
|
|
setDriveCount := len(format.Erasure.Sets[0])
|
2020-04-27 17:39:57 -04:00
|
|
|
|
2020-06-10 20:10:31 -04:00
|
|
|
endpointStrings := make([]string, len(endpoints))
|
2020-10-09 18:40:46 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Initialize the erasure sets instance.
|
|
|
|
s := &erasureSets{
|
2021-01-16 15:08:02 -05:00
|
|
|
sets: make([]*erasureObjects, setCount),
|
|
|
|
erasureDisks: make([][]StorageAPI, setCount),
|
|
|
|
erasureLockers: make([][]dsync.NetLocker, setCount),
|
|
|
|
erasureLockOwner: GetLocalPeer(globalEndpoints),
|
|
|
|
endpoints: endpoints,
|
|
|
|
endpointStrings: endpointStrings,
|
|
|
|
setCount: setCount,
|
|
|
|
setDriveCount: setDriveCount,
|
|
|
|
defaultParityCount: defaultParityCount,
|
|
|
|
format: format,
|
|
|
|
disksConnectEvent: make(chan diskConnectInfo),
|
|
|
|
distributionAlgo: format.Erasure.DistributionAlgo,
|
|
|
|
deploymentID: uuid.MustParse(format.ID),
|
|
|
|
mrfOperations: make(map[healSource]int),
|
2021-03-04 17:36:23 -05:00
|
|
|
poolIndex: poolIdx,
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
mutex := newNSLock(globalIsDistErasure)
|
2018-06-01 19:41:23 -04:00
|
|
|
|
[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 17:09:34 -05:00
|
|
|
// Number of buffers, max 2GB
|
|
|
|
n := (2 * humanize.GiByte) / (blockSizeV2 * 2)
|
|
|
|
|
2018-06-01 19:41:23 -04:00
|
|
|
// Initialize byte pool once for all sets, bpool size is set to
|
[feat]: change erasure coding default block size from 10MiB to 1MiB (#11721)
major performance improvements in range GETs to avoid large
read amplification when ranges are tiny and random
```
-------------------
Operation: GET
Operations: 142014 -> 339421
Duration: 4m50s -> 4m56s
* Average: +139.41% (+1177.3 MiB/s) throughput, +139.11% (+658.4) obj/s
* Fastest: +125.24% (+1207.4 MiB/s) throughput, +132.32% (+612.9) obj/s
* 50% Median: +139.06% (+1175.7 MiB/s) throughput, +133.46% (+660.9) obj/s
* Slowest: +203.40% (+1267.9 MiB/s) throughput, +198.59% (+753.5) obj/s
```
TTFB from 10MiB BlockSize
```
* First Access TTFB: Avg: 81ms, Median: 61ms, Best: 20ms, Worst: 2.056s
```
TTFB from 1MiB BlockSize
```
* First Access TTFB: Avg: 22ms, Median: 21ms, Best: 8ms, Worst: 91ms
```
Full object reads however do see a slight change which won't be
noticeable in real world, so not doing any comparisons
TTFB still had improvements with full object reads with 1MiB
```
* First Access TTFB: Avg: 68ms, Median: 35ms, Best: 11ms, Worst: 1.16s
```
v/s
TTFB with 10MiB
```
* First Access TTFB: Avg: 388ms, Median: 98ms, Best: 20ms, Worst: 4.156s
```
This change should affect all new uploads, previous uploads should
continue to work with business as usual. But dramatic improvements can
be seen with these changes.
2021-03-06 17:09:34 -05:00
|
|
|
// setCount * setDriveCount with each memory upto blockSizeV2.
|
|
|
|
bp := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2)
|
2018-06-01 19:41:23 -04:00
|
|
|
|
2020-03-27 17:48:30 -04:00
|
|
|
for i := 0; i < setCount; i++ {
|
2020-08-26 22:29:35 -04:00
|
|
|
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
|
2020-12-10 10:28:37 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var erasureLockers = map[string]dsync.NetLocker{}
|
|
|
|
for _, endpoint := range endpoints {
|
|
|
|
if _, ok := erasureLockers[endpoint.Host]; !ok {
|
|
|
|
erasureLockers[endpoint.Host] = newLockAPI(endpoint)
|
|
|
|
}
|
2020-04-29 16:42:37 -04:00
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2020-04-29 16:42:37 -04:00
|
|
|
for i := 0; i < setCount; i++ {
|
2020-12-10 10:28:37 -05:00
|
|
|
var lockerEpSet = set.NewStringSet()
|
2020-08-26 22:29:35 -04:00
|
|
|
for j := 0; j < setDriveCount; j++ {
|
|
|
|
endpoint := endpoints[i*setDriveCount+j]
|
2020-12-10 10:28:37 -05:00
|
|
|
// Only add lockers only one per endpoint and per erasure set.
|
|
|
|
if locker, ok := erasureLockers[endpoint.Host]; ok && !lockerEpSet.Contains(endpoint.Host) {
|
|
|
|
lockerEpSet.Add(endpoint.Host)
|
|
|
|
s.erasureLockers[i] = append(s.erasureLockers[i], locker)
|
|
|
|
}
|
2020-08-26 22:29:35 -04:00
|
|
|
disk := storageDisks[i*setDriveCount+j]
|
2020-04-03 21:06:31 -04:00
|
|
|
if disk == nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
|
|
|
}
|
2020-04-03 21:06:31 -04:00
|
|
|
diskID, derr := disk.GetDiskID()
|
|
|
|
if derr != nil {
|
|
|
|
continue
|
2020-03-27 17:48:30 -04:00
|
|
|
}
|
2020-04-03 21:06:31 -04:00
|
|
|
m, n, err := findDiskIndexByDiskID(format, diskID)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2021-03-04 17:36:23 -05:00
|
|
|
disk.SetDiskLoc(s.poolIndex, m, n)
|
2020-08-26 22:29:35 -04:00
|
|
|
s.endpointStrings[m*setDriveCount+n] = disk.String()
|
2020-06-12 23:04:01 -04:00
|
|
|
s.erasureDisks[m][n] = disk
|
2020-03-04 19:18:32 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// Initialize erasure objects for a given set.
|
|
|
|
s.sets[i] = &erasureObjects{
|
2021-03-04 17:36:23 -05:00
|
|
|
setIndex: i,
|
|
|
|
poolIndex: poolIdx,
|
2021-02-26 12:52:27 -05:00
|
|
|
setDriveCount: setDriveCount,
|
|
|
|
defaultParityCount: defaultParityCount,
|
|
|
|
getDisks: s.GetDisks(i),
|
|
|
|
getLockers: s.GetLockers(i),
|
|
|
|
getEndpoints: s.GetEndpoints(i),
|
|
|
|
deletedCleanupSleeper: newDynamicSleeper(10, 10*time.Second),
|
|
|
|
nsMutex: mutex,
|
|
|
|
bp: bp,
|
|
|
|
mrfOpCh: make(chan partialOperation, 10000),
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-26 12:52:27 -05:00
|
|
|
// cleanup ".trash/" folder every 30 minutes with sufficient sleep cycles.
|
2021-03-09 12:45:38 -05:00
|
|
|
const deletedObjectsCleanupInterval = 10 * time.Minute
|
2021-02-26 12:52:27 -05:00
|
|
|
|
2020-12-10 10:28:37 -05:00
|
|
|
// start cleanup stale uploads go-routine.
|
|
|
|
go s.cleanupStaleUploads(ctx, GlobalStaleUploadsCleanupInterval, GlobalStaleUploadsExpiry)
|
|
|
|
|
2021-02-26 12:52:27 -05:00
|
|
|
// start cleanup of deleted objects.
|
|
|
|
go s.cleanupDeletedObjects(ctx, deletedObjectsCleanupInterval)
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Start the disk monitoring and connect routine.
|
2020-10-31 17:10:12 -04:00
|
|
|
go s.monitorAndConnectEndpoints(ctx, defaultMonitorConnectEndpointInterval)
|
2020-01-15 21:30:32 -05:00
|
|
|
go s.maintainMRFList()
|
|
|
|
go s.healMRFRoutine()
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
2021-02-26 12:52:27 -05:00
|
|
|
func (s *erasureSets) cleanupDeletedObjects(ctx context.Context, cleanupInterval time.Duration) {
|
|
|
|
timer := time.NewTimer(cleanupInterval)
|
|
|
|
defer timer.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-timer.C:
|
|
|
|
// Reset for the next interval
|
|
|
|
timer.Reset(cleanupInterval)
|
|
|
|
|
|
|
|
for _, set := range s.sets {
|
|
|
|
set.cleanupDeletedObjects(ctx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-10 10:28:37 -05:00
|
|
|
func (s *erasureSets) cleanupStaleUploads(ctx context.Context, cleanupInterval, expiry time.Duration) {
|
2021-02-05 22:23:48 -05:00
|
|
|
timer := time.NewTimer(cleanupInterval)
|
|
|
|
defer timer.Stop()
|
2020-12-10 10:28:37 -05:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
2021-02-05 22:23:48 -05:00
|
|
|
case <-timer.C:
|
|
|
|
// Reset for the next interval
|
|
|
|
timer.Reset(cleanupInterval)
|
|
|
|
|
2020-12-10 10:28:37 -05:00
|
|
|
for _, set := range s.sets {
|
|
|
|
set.cleanupStaleUploads(ctx, expiry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-26 16:21:51 -05:00
|
|
|
const objectErasureMapKey = "objectErasureMap"
|
|
|
|
|
|
|
|
type auditObjectOp struct {
|
2021-01-26 16:39:55 -05:00
|
|
|
Pool int `json:"poolId"`
|
|
|
|
Set int `json:"setId"`
|
|
|
|
Disks []string `json:"disks"`
|
2021-01-26 16:21:51 -05:00
|
|
|
}
|
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjects) {
|
2021-01-26 16:21:51 -05:00
|
|
|
if len(logger.AuditTargets) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
object = decodeDirObject(object)
|
|
|
|
|
|
|
|
op := auditObjectOp{
|
2021-03-04 17:36:23 -05:00
|
|
|
Pool: set.poolIndex + 1,
|
|
|
|
Set: set.setIndex + 1,
|
2021-01-26 16:39:55 -05:00
|
|
|
Disks: set.getEndpoints(),
|
2021-01-26 16:21:51 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
var objectErasureSetTag map[string]auditObjectOp
|
|
|
|
reqInfo := logger.GetReqInfo(ctx)
|
|
|
|
for _, kv := range reqInfo.GetTags() {
|
|
|
|
if kv.Key == objectErasureMapKey {
|
|
|
|
objectErasureSetTag = kv.Val.(map[string]auditObjectOp)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if objectErasureSetTag == nil {
|
|
|
|
objectErasureSetTag = make(map[string]auditObjectOp)
|
|
|
|
}
|
|
|
|
|
|
|
|
objectErasureSetTag[object] = op
|
|
|
|
reqInfo.SetTags(objectErasureMapKey, objectErasureSetTag)
|
|
|
|
}
|
|
|
|
|
2019-11-13 15:17:45 -05:00
|
|
|
// NewNSLock - initialize a new namespace RWLocker instance.
|
2020-11-04 11:25:42 -05:00
|
|
|
func (s *erasureSets) NewNSLock(bucket string, objects ...string) RWLocker {
|
2020-02-21 00:59:57 -05:00
|
|
|
if len(objects) == 1 {
|
2020-11-04 11:25:42 -05:00
|
|
|
return s.getHashedSet(objects[0]).NewNSLock(bucket, objects...)
|
2020-02-21 00:59:57 -05:00
|
|
|
}
|
2020-11-04 11:25:42 -05:00
|
|
|
return s.getHashedSet("").NewNSLock(bucket, objects...)
|
2019-11-13 15:17:45 -05:00
|
|
|
}
|
|
|
|
|
2020-08-05 16:31:12 -04:00
|
|
|
// SetDriveCount returns the current drives per set.
|
|
|
|
func (s *erasureSets) SetDriveCount() int {
|
2020-08-26 22:29:35 -04:00
|
|
|
return s.setDriveCount
|
2020-08-05 16:31:12 -04:00
|
|
|
}
|
|
|
|
|
2021-01-16 15:08:02 -05:00
|
|
|
// ParityCount returns the default parity count used while erasure
|
|
|
|
// coding objects
|
|
|
|
func (s *erasureSets) ParityCount() int {
|
|
|
|
return s.defaultParityCount
|
|
|
|
}
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
// StorageUsageInfo - combines output of StorageInfo across all erasure coded object sets.
|
2020-12-01 16:50:33 -05:00
|
|
|
// This only returns disk usage info for ServerPools to perform placement decision, this call
|
2020-05-28 16:03:04 -04:00
|
|
|
// is not implemented in Object interface and is not meant to be used by other object
|
|
|
|
// layer implementations.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) StorageUsageInfo(ctx context.Context) StorageInfo {
|
2020-05-28 16:03:04 -04:00
|
|
|
storageUsageInfo := func() StorageInfo {
|
|
|
|
var storageInfo StorageInfo
|
|
|
|
storageInfos := make([]StorageInfo, len(s.sets))
|
2021-03-04 17:36:23 -05:00
|
|
|
storageInfo.Backend.Type = madmin.Erasure
|
2020-05-28 16:03:04 -04:00
|
|
|
|
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
// ignoring errors on purpose
|
2021-01-04 12:42:09 -05:00
|
|
|
storageInfos[index], _ = s.sets[index].StorageInfo(ctx)
|
2020-05-28 16:03:04 -04:00
|
|
|
return nil
|
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the go routines.
|
|
|
|
g.Wait()
|
|
|
|
|
|
|
|
for _, lstorageInfo := range storageInfos {
|
2020-07-13 12:51:07 -04:00
|
|
|
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
|
2020-05-28 16:03:04 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
return storageInfo
|
|
|
|
}
|
|
|
|
|
2020-05-26 15:52:24 -04:00
|
|
|
s.disksStorageInfoCache.Once.Do(func() {
|
|
|
|
s.disksStorageInfoCache.TTL = time.Second
|
|
|
|
s.disksStorageInfoCache.Update = func() (interface{}, error) {
|
2020-05-28 16:03:04 -04:00
|
|
|
return storageUsageInfo(), nil
|
2020-05-26 15:52:24 -04:00
|
|
|
}
|
|
|
|
})
|
2020-05-28 16:03:04 -04:00
|
|
|
|
2020-05-26 15:52:24 -04:00
|
|
|
v, _ := s.disksStorageInfoCache.Get()
|
|
|
|
return v.(StorageInfo)
|
|
|
|
}
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
2021-01-04 12:42:09 -05:00
|
|
|
func (s *erasureSets) StorageInfo(ctx context.Context) (StorageInfo, []error) {
|
2021-03-04 17:36:23 -05:00
|
|
|
var storageInfo madmin.StorageInfo
|
2019-08-22 23:02:40 -04:00
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
storageInfos := make([]madmin.StorageInfo, len(s.sets))
|
2020-05-28 16:03:04 -04:00
|
|
|
storageInfoErrs := make([][]error, len(s.sets))
|
2019-10-14 12:44:51 -04:00
|
|
|
|
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2021-01-04 12:42:09 -05:00
|
|
|
storageInfos[index], storageInfoErrs[index] = s.sets[index].StorageInfo(ctx)
|
2019-10-14 12:44:51 -04:00
|
|
|
return nil
|
|
|
|
}, index)
|
2019-08-22 23:02:40 -04:00
|
|
|
}
|
2019-10-14 12:44:51 -04:00
|
|
|
|
2019-08-22 23:02:40 -04:00
|
|
|
// Wait for the go routines.
|
2019-10-14 12:44:51 -04:00
|
|
|
g.Wait()
|
2019-08-22 23:02:40 -04:00
|
|
|
|
|
|
|
for _, lstorageInfo := range storageInfos {
|
2020-07-13 12:51:07 -04:00
|
|
|
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-05-28 16:03:04 -04:00
|
|
|
var errs []error
|
|
|
|
for i := range s.sets {
|
|
|
|
errs = append(errs, storageInfoErrs[i]...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return storageInfo, errs
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2021-03-02 20:28:04 -05:00
|
|
|
// StorageInfo - combines output of StorageInfo across all erasure coded object sets.
|
|
|
|
func (s *erasureSets) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) {
|
|
|
|
var storageInfo StorageInfo
|
|
|
|
|
|
|
|
storageInfos := make([]StorageInfo, len(s.sets))
|
|
|
|
storageInfoErrs := make([][]error, len(s.sets))
|
|
|
|
|
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
|
|
|
storageInfos[index], storageInfoErrs[index] = s.sets[index].LocalStorageInfo(ctx)
|
|
|
|
return nil
|
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the go routines.
|
|
|
|
g.Wait()
|
|
|
|
|
|
|
|
for _, lstorageInfo := range storageInfos {
|
|
|
|
storageInfo.Disks = append(storageInfo.Disks, lstorageInfo.Disks...)
|
|
|
|
}
|
|
|
|
|
|
|
|
var errs []error
|
|
|
|
for i := range s.sets {
|
|
|
|
errs = append(errs, storageInfoErrs[i]...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return storageInfo, errs
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Shutdown shutsdown all erasure coded sets in parallel
|
|
|
|
// returns error upon first error.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) Shutdown(ctx context.Context) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2018-03-14 15:01:47 -04:00
|
|
|
return s.sets[index].Shutdown(ctx)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, err := range g.Wait() {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-09-10 12:18:19 -04:00
|
|
|
select {
|
|
|
|
case _, ok := <-s.disksConnectEvent:
|
|
|
|
if ok {
|
|
|
|
close(s.disksConnectEvent)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
close(s.disksConnectEvent)
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-05-12 18:20:42 -04:00
|
|
|
// MakeBucketLocation - creates a new bucket across all sets simultaneously,
|
|
|
|
// then return the first encountered error
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
// Create buckets in parallel across all sets.
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2020-06-12 23:04:01 -04:00
|
|
|
return s.sets[index].MakeBucketWithLocation(ctx, bucket, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
errs := g.Wait()
|
2020-05-12 18:20:42 -04:00
|
|
|
|
|
|
|
// Return the first encountered error
|
2018-02-15 20:45:57 -05:00
|
|
|
for _, err := range errs {
|
2018-02-16 23:16:48 -05:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// hashes the key returning an integer based on the input algorithm.
|
|
|
|
// This function currently supports
|
|
|
|
// - CRCMOD
|
2020-06-12 23:04:01 -04:00
|
|
|
// - SIPMOD
|
2018-02-15 20:45:57 -05:00
|
|
|
// - all new algos.
|
2020-06-12 23:04:01 -04:00
|
|
|
func sipHashMod(key string, cardinality int, id [16]byte) int {
|
|
|
|
if cardinality <= 0 {
|
|
|
|
return -1
|
|
|
|
}
|
2021-02-26 19:53:06 -05:00
|
|
|
// use the faster version as per siphash docs
|
|
|
|
// https://github.com/dchest/siphash#usage
|
|
|
|
k0, k1 := binary.LittleEndian.Uint64(id[0:8]), binary.LittleEndian.Uint64(id[8:16])
|
|
|
|
sum64 := siphash.Hash(k0, k1, []byte(key))
|
|
|
|
return int(sum64 % uint64(cardinality))
|
2020-06-12 23:04:01 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
func crcHashMod(key string, cardinality int) int {
|
|
|
|
if cardinality <= 0 {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable)
|
|
|
|
return int(keyCrc % uint32(cardinality))
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func hashKey(algo string, key string, cardinality int, id [16]byte) int {
|
2018-02-15 20:45:57 -05:00
|
|
|
switch algo {
|
2021-01-16 15:08:02 -05:00
|
|
|
case formatErasureVersionV2DistributionAlgoV1:
|
2018-02-15 20:45:57 -05:00
|
|
|
return crcHashMod(key, cardinality)
|
2021-01-16 15:08:02 -05:00
|
|
|
case formatErasureVersionV3DistributionAlgoV2, formatErasureVersionV3DistributionAlgoV3:
|
2020-06-12 23:04:01 -04:00
|
|
|
return sipHashMod(key, cardinality, id)
|
2018-08-06 13:26:40 -04:00
|
|
|
default:
|
|
|
|
// Unknown algorithm returns -1, also if cardinality is lesser than 0.
|
|
|
|
return -1
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
// Returns always a same erasure coded set for a given input.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) getHashedSetIndex(input string) int {
|
|
|
|
return hashKey(s.distributionAlgo, input, len(s.sets), s.deploymentID)
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Returns always a same erasure coded set for a given input.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) {
|
2019-05-13 15:25:49 -04:00
|
|
|
return s.sets[s.getHashedSetIndex(input)]
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketInfo - returns bucket info from one of the erasure coded set.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) GetBucketInfo(ctx context.Context, bucket string) (bucketInfo BucketInfo, err error) {
|
2019-11-19 20:42:27 -05:00
|
|
|
return s.getHashedSet("").GetBucketInfo(ctx, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) IsNotificationSupported() bool {
|
2018-02-15 20:45:57 -05:00
|
|
|
return s.getHashedSet("").IsNotificationSupported()
|
|
|
|
}
|
|
|
|
|
2020-07-20 15:52:49 -04:00
|
|
|
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
|
|
|
|
func (s *erasureSets) IsListenSupported() bool {
|
2018-12-05 17:03:42 -05:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-01-05 17:16:43 -05:00
|
|
|
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) IsEncryptionSupported() bool {
|
2018-02-15 20:45:57 -05:00
|
|
|
return s.getHashedSet("").IsEncryptionSupported()
|
|
|
|
}
|
|
|
|
|
2018-09-27 23:36:17 -04:00
|
|
|
// IsCompressionSupported returns whether compression is applicable for this layer.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) IsCompressionSupported() bool {
|
2018-09-27 23:36:17 -04:00
|
|
|
return s.getHashedSet("").IsCompressionSupported()
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) IsTaggingSupported() bool {
|
2020-05-23 14:09:35 -04:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// DeleteBucket - deletes a bucket on all sets simultaneously,
|
|
|
|
// even if one of the sets fail to delete buckets, we proceed to
|
|
|
|
// undo a successful operation.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(s.sets))
|
|
|
|
|
|
|
|
// Delete buckets in parallel across all sets.
|
|
|
|
for index := range s.sets {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2020-03-28 00:52:59 -04:00
|
|
|
return s.sets[index].DeleteBucket(ctx, bucket, forceDelete)
|
2018-02-15 20:45:57 -05:00
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
|
|
|
|
errs := g.Wait()
|
2020-04-27 17:18:02 -04:00
|
|
|
// For any failure, we attempt undo all the delete buckets operation
|
|
|
|
// by creating buckets again on all sets which were successfully deleted.
|
2018-02-15 20:45:57 -05:00
|
|
|
for _, err := range errs {
|
2018-02-16 23:16:48 -05:00
|
|
|
if err != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
undoDeleteBucketSets(ctx, bucket, s.sets, errs)
|
2018-02-16 23:16:48 -05:00
|
|
|
return err
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all bucket metadata.
|
2020-05-19 16:53:54 -04:00
|
|
|
deleteBucketMetadata(ctx, s, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
// Success.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function is used to undo a successful DeleteBucket operation.
|
2020-06-12 23:04:01 -04:00
|
|
|
func undoDeleteBucketSets(ctx context.Context, bucket string, sets []*erasureObjects, errs []error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
g := errgroup.WithNErrs(len(sets))
|
|
|
|
|
|
|
|
// Undo previous delete bucket on all underlying sets.
|
|
|
|
for index := range sets {
|
|
|
|
index := index
|
2019-10-14 12:44:51 -04:00
|
|
|
g.Go(func() error {
|
|
|
|
if errs[index] == nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return sets[index].MakeBucketWithLocation(ctx, bucket, BucketOptions{})
|
2019-10-14 12:44:51 -04:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}, index)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
g.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// List all buckets from one of the set, we are not doing merge
|
|
|
|
// sort here just for simplification. As per design it is assumed
|
|
|
|
// that all buckets are present on all sets.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) ListBuckets(ctx context.Context) (buckets []BucketInfo, err error) {
|
2020-12-14 15:07:07 -05:00
|
|
|
var listBuckets []BucketInfo
|
|
|
|
var healBuckets = map[string]VolInfo{}
|
|
|
|
for _, set := range s.sets {
|
|
|
|
// lists all unique buckets across drives.
|
|
|
|
if err := listAllBuckets(ctx, set.getDisks(), healBuckets); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2020-12-15 20:34:54 -05:00
|
|
|
|
2020-12-14 15:07:07 -05:00
|
|
|
for _, v := range healBuckets {
|
|
|
|
listBuckets = append(listBuckets, BucketInfo(v))
|
|
|
|
}
|
2020-12-15 20:34:54 -05:00
|
|
|
|
|
|
|
sort.Slice(listBuckets, func(i, j int) bool {
|
|
|
|
return listBuckets[i].Name < listBuckets[j].Name
|
|
|
|
})
|
2020-12-14 15:07:07 -05:00
|
|
|
|
|
|
|
return listBuckets, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// --- Object Operations ---
|
|
|
|
|
2018-09-20 22:22:09 -04:00
|
|
|
// GetObjectNInfo - returns object info and locked object ReadCloser
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
2018-09-20 22:22:09 -04:00
|
|
|
}
|
|
|
|
|
2021-01-17 04:11:48 -05:00
|
|
|
func (s *erasureSets) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
2021-01-18 15:25:22 -05:00
|
|
|
if parent == "." {
|
|
|
|
return false
|
|
|
|
}
|
2021-01-17 04:11:48 -05:00
|
|
|
return s.getHashedSet(parent).parentDirIsObject(ctx, bucket, parent)
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// PutObject - writes an object to hashedSet based on the object name.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) PutObject(ctx context.Context, bucket string, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-17 04:11:48 -05:00
|
|
|
opts.ParentIsObject = s.parentDirIsObject
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.PutObject(ctx, bucket, object, data, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetObjectInfo - reads object metadata from the hashedSet based on the object name.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.GetObjectInfo(ctx, bucket, object, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObject - deletes an object from the hashedSet based on the object name.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) DeleteObject(ctx context.Context, bucket string, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.DeleteObject(ctx, bucket, object, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
// DeleteObjects - bulk delete of objects
|
|
|
|
// Bulk delete is only possible within one set. For that purpose
|
|
|
|
// objects are group by set first, and then bulk delete is invoked
|
|
|
|
// for each set, the error response of each delete will be returned
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
|
2019-05-13 15:25:49 -04:00
|
|
|
type delObj struct {
|
|
|
|
// Set index associated to this object
|
|
|
|
setIndex int
|
|
|
|
// Original index from the list of arguments
|
|
|
|
// where this object is passed
|
|
|
|
origIndex int
|
2020-06-12 23:04:01 -04:00
|
|
|
// object to delete
|
|
|
|
object ObjectToDelete
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Transform []delObj to the list of object names
|
2020-06-12 23:04:01 -04:00
|
|
|
toNames := func(delObjs []delObj) []ObjectToDelete {
|
|
|
|
objs := make([]ObjectToDelete, len(delObjs))
|
2019-05-13 15:25:49 -04:00
|
|
|
for i, obj := range delObjs {
|
2020-06-12 23:04:01 -04:00
|
|
|
objs[i] = obj.object
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
2020-06-12 23:04:01 -04:00
|
|
|
return objs
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// The result of delete operation on all passed objects
|
|
|
|
var delErrs = make([]error, len(objects))
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// The result of delete objects
|
|
|
|
var delObjects = make([]DeletedObject, len(objects))
|
|
|
|
|
2019-05-13 15:25:49 -04:00
|
|
|
// A map between a set and its associated objects
|
|
|
|
var objSetMap = make(map[int][]delObj)
|
|
|
|
|
|
|
|
// Group objects by set index
|
|
|
|
for i, object := range objects {
|
2020-06-12 23:04:01 -04:00
|
|
|
index := s.getHashedSetIndex(object.ObjectName)
|
|
|
|
objSetMap[index] = append(objSetMap[index], delObj{setIndex: index, origIndex: i, object: object})
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
// Invoke bulk delete on objects per set and save
|
|
|
|
// the result of the delete operation
|
|
|
|
for _, objsGroup := range objSetMap {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(objsGroup[0].object.ObjectName)
|
|
|
|
dobjects, errs := set.DeleteObjects(ctx, bucket, toNames(objsGroup), opts)
|
2019-05-13 15:25:49 -04:00
|
|
|
for i, obj := range objsGroup {
|
|
|
|
delErrs[obj.origIndex] = errs[i]
|
2020-11-29 00:15:45 -05:00
|
|
|
delObjects[obj.origIndex] = dobjects[i]
|
2021-01-26 16:21:51 -05:00
|
|
|
if errs[i] == nil {
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, obj.object.ObjectName, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
}
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
return delObjects, delErrs
|
2019-05-13 15:25:49 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// CopyObject - copies objects from one hashedSet to another hashedSet, on server side.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
srcSet := s.getHashedSet(srcObject)
|
2020-05-28 17:36:38 -04:00
|
|
|
dstSet := s.getHashedSet(dstObject)
|
2018-02-15 20:45:57 -05:00
|
|
|
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, dstObject, dstSet)
|
2021-01-26 16:21:51 -05:00
|
|
|
|
2020-08-03 19:21:10 -04:00
|
|
|
cpSrcDstSame := srcSet == dstSet
|
2018-02-15 20:45:57 -05:00
|
|
|
// Check if this request is only metadata update.
|
2020-08-03 19:21:10 -04:00
|
|
|
if cpSrcDstSame && srcInfo.metadataOnly {
|
2020-09-14 18:57:13 -04:00
|
|
|
// Version ID is set for the destination and source == destination version ID.
|
|
|
|
// perform an in-place update.
|
2020-06-19 11:44:51 -04:00
|
|
|
if dstOpts.VersionID != "" && srcOpts.VersionID == dstOpts.VersionID {
|
|
|
|
return srcSet.CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
2020-09-14 18:57:13 -04:00
|
|
|
// Destination is not versioned and source version ID is empty
|
|
|
|
// perform an in-place update.
|
2020-06-19 11:44:51 -04:00
|
|
|
if !dstOpts.Versioned && srcOpts.VersionID == "" {
|
|
|
|
return srcSet.CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
2020-08-03 19:21:10 -04:00
|
|
|
// CopyObject optimization where we don't create an entire copy
|
|
|
|
// of the content, instead we add a reference, we disallow legacy
|
|
|
|
// objects to be self referenced in this manner so make sure
|
|
|
|
// that we actually create a new dataDir for legacy objects.
|
|
|
|
if dstOpts.Versioned && srcOpts.VersionID != dstOpts.VersionID && !srcInfo.Legacy {
|
|
|
|
srcInfo.versionOnly = true
|
|
|
|
return srcSet.CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, srcInfo, srcOpts, dstOpts)
|
|
|
|
}
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-06-17 14:13:41 -04:00
|
|
|
putOpts := ObjectOptions{
|
|
|
|
ServerSideEncryption: dstOpts.ServerSideEncryption,
|
|
|
|
UserDefined: srcInfo.UserDefined,
|
|
|
|
Versioned: dstOpts.Versioned,
|
|
|
|
VersionID: dstOpts.VersionID,
|
2020-11-19 14:50:22 -05:00
|
|
|
MTime: dstOpts.MTime,
|
2020-06-17 14:13:41 -04:00
|
|
|
}
|
2020-06-19 11:44:51 -04:00
|
|
|
|
2020-05-28 17:36:38 -04:00
|
|
|
return dstSet.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// In list multipart uploads we are going to treat input prefix as the object,
|
|
|
|
// this means that we are not supporting directory navigation.
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(prefix)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, prefix, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initiate a new multipart upload on a hashedSet based on object name.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (uploadID string, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.NewMultipartUpload(ctx, bucket, object, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copies a part of an object from source hashedSet to destination hashedSet.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
|
2018-09-10 12:42:43 -04:00
|
|
|
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
destSet := s.getHashedSet(destObject)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, destObject, destSet)
|
2021-02-10 11:52:50 -05:00
|
|
|
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// PutObjectPart - writes part of an object to hashedSet based on the object name.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-05-28 15:36:20 -04:00
|
|
|
// GetMultipartInfo - return multipart metadata info uploaded at hashedSet.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (result MultipartInfo, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.GetMultipartInfo(ctx, bucket, object, uploadID, opts)
|
2020-05-28 15:36:20 -04:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Aborts an in-progress multipart operation on hashedSet based on the object name.
|
2020-09-14 18:57:13 -04:00
|
|
|
func (s *erasureSets) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.AbortMultipartUpload(ctx, bucket, object, uploadID, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
set := s.getHashedSet(object)
|
2021-03-04 17:36:23 -05:00
|
|
|
auditObjectErasureSet(ctx, object, set)
|
2021-01-17 04:11:48 -05:00
|
|
|
opts.ParentIsObject = s.parentDirIsObject
|
2021-01-26 16:21:51 -05:00
|
|
|
return set.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
|
|
All disks online
|
|
|
|
-----------------
|
|
|
|
- All Unformatted - format all and return success.
|
|
|
|
- Some Unformatted - format all and return success.
|
|
|
|
- Any JBOD inconsistent - return failure
|
|
|
|
- Some are corrupt (missing format.json) - return failure
|
|
|
|
- Any unrecognized disks - return failure
|
|
|
|
|
|
|
|
Some disks are offline and we have quorum.
|
|
|
|
-----------------
|
|
|
|
- Some unformatted - format all and return success,
|
|
|
|
treat disks offline as corrupted.
|
|
|
|
- Any JBOD inconsistent - return failure
|
|
|
|
- Some are corrupt (missing format.json)
|
|
|
|
- Any unrecognized disks - return failure
|
|
|
|
|
|
|
|
No read quorum
|
|
|
|
-----------------
|
|
|
|
failure for all cases.
|
|
|
|
|
|
|
|
// Pseudo code for managing `format.json`.
|
|
|
|
|
|
|
|
// Generic checks.
|
|
|
|
if (no quorum) return error
|
|
|
|
if (any disk is corrupt) return error // Always error
|
|
|
|
if (jbod inconsistent) return error // Always error.
|
|
|
|
if (disks not recognized) // Always error.
|
|
|
|
|
|
|
|
// Specific checks.
|
|
|
|
if (all disks online)
|
|
|
|
if (all disks return format.json)
|
|
|
|
if (jbod consistent)
|
|
|
|
if (all disks recognized)
|
|
|
|
return
|
|
|
|
else
|
|
|
|
if (all disks return format.json not found)
|
|
|
|
return error
|
|
|
|
else (some disks return format.json not found)
|
|
|
|
(heal format)
|
|
|
|
return
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
else
|
|
|
|
if (some disks return format.json not found)
|
|
|
|
// Offline disks are marked as dead.
|
|
|
|
(heal format) // Offline disks should be marked as dead.
|
|
|
|
return success
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
*/
|
|
|
|
|
2020-07-13 12:51:07 -04:00
|
|
|
func formatsToDrivesInfo(endpoints Endpoints, formats []*formatErasureV3, sErrs []error) (beforeDrives []madmin.HealDriveInfo) {
|
|
|
|
beforeDrives = make([]madmin.HealDriveInfo, len(endpoints))
|
2018-02-15 20:45:57 -05:00
|
|
|
// Existing formats are available (i.e. ok), so save it in
|
|
|
|
// result, also populate disks to be healed.
|
|
|
|
for i, format := range formats {
|
|
|
|
drive := endpoints.GetString(i)
|
2019-08-30 17:11:18 -04:00
|
|
|
var state = madmin.DriveStateCorrupt
|
2018-02-15 20:45:57 -05:00
|
|
|
switch {
|
|
|
|
case format != nil:
|
2019-08-30 17:11:18 -04:00
|
|
|
state = madmin.DriveStateOk
|
2018-02-15 20:45:57 -05:00
|
|
|
case sErrs[i] == errUnformattedDisk:
|
2019-08-30 17:11:18 -04:00
|
|
|
state = madmin.DriveStateMissing
|
2019-08-02 15:17:26 -04:00
|
|
|
case sErrs[i] == errDiskNotFound:
|
2019-08-30 17:11:18 -04:00
|
|
|
state = madmin.DriveStateOffline
|
|
|
|
}
|
2020-07-13 12:51:07 -04:00
|
|
|
beforeDrives[i] = madmin.HealDriveInfo{
|
2019-08-30 17:11:18 -04:00
|
|
|
UUID: func() string {
|
|
|
|
if format != nil {
|
2020-06-12 23:04:01 -04:00
|
|
|
return format.Erasure.This
|
2019-08-30 17:11:18 -04:00
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}(),
|
|
|
|
Endpoint: drive,
|
|
|
|
State: state,
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return beforeDrives
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
// If it is a single node Erasure and all disks are root disks, it is most likely a test setup, else it is a production setup.
|
2019-02-06 14:44:19 -05:00
|
|
|
// On a test setup we allow creation of format.json on root disks to help with dev/testing.
|
|
|
|
func isTestSetup(infos []DiskInfo, errs []error) bool {
|
|
|
|
rootDiskCount := 0
|
|
|
|
for i := range errs {
|
2020-09-04 20:09:02 -04:00
|
|
|
if errs[i] == nil || errs[i] == errUnformattedDisk {
|
|
|
|
if infos[i].RootDisk {
|
|
|
|
rootDiskCount++
|
|
|
|
}
|
2019-02-06 14:44:19 -05:00
|
|
|
}
|
|
|
|
}
|
2020-09-04 20:09:02 -04:00
|
|
|
// It is a test setup if all disks are root disks in quorum.
|
|
|
|
return rootDiskCount >= len(infos)/2+1
|
2019-02-06 14:44:19 -05:00
|
|
|
}
|
|
|
|
|
2020-08-18 17:37:26 -04:00
|
|
|
func getHealDiskInfos(storageDisks []StorageAPI, errs []error) ([]DiskInfo, []error) {
|
2019-02-06 14:44:19 -05:00
|
|
|
infos := make([]DiskInfo, len(storageDisks))
|
2019-10-14 12:44:51 -04:00
|
|
|
g := errgroup.WithNErrs(len(storageDisks))
|
|
|
|
for index := range storageDisks {
|
|
|
|
index := index
|
|
|
|
g.Go(func() error {
|
2020-08-18 17:37:26 -04:00
|
|
|
if errs[index] != nil && errs[index] != errUnformattedDisk {
|
|
|
|
return errs[index]
|
|
|
|
}
|
|
|
|
if storageDisks[index] == nil {
|
|
|
|
return errDiskNotFound
|
2019-10-14 12:44:51 -04:00
|
|
|
}
|
2020-08-18 17:37:26 -04:00
|
|
|
var err error
|
2020-09-04 12:45:06 -04:00
|
|
|
infos[index], err = storageDisks[index].DiskInfo(context.TODO())
|
2019-10-14 12:44:51 -04:00
|
|
|
return err
|
|
|
|
}, index)
|
|
|
|
}
|
|
|
|
return infos, g.Wait()
|
2019-02-06 14:44:19 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mark root disks as down so as not to heal them.
|
2020-08-18 17:37:26 -04:00
|
|
|
func markRootDisksAsDown(storageDisks []StorageAPI, errs []error) {
|
|
|
|
var infos []DiskInfo
|
|
|
|
infos, errs = getHealDiskInfos(storageDisks, errs)
|
|
|
|
if !isTestSetup(infos, errs) {
|
|
|
|
for i := range storageDisks {
|
|
|
|
if storageDisks[i] != nil && infos[i].RootDisk {
|
|
|
|
// We should not heal on root disk. i.e in a situation where the minio-administrator has unmounted a
|
|
|
|
// defective drive we should not heal a path on the root disk.
|
2021-01-20 16:11:42 -05:00
|
|
|
logger.Info("Disk `%s` the same as the system root disk.\n"+
|
|
|
|
"Disk will not be used. Please supply a separate disk and restart the server.",
|
2020-08-18 17:37:26 -04:00
|
|
|
storageDisks[i].String())
|
|
|
|
storageDisks[i] = nil
|
|
|
|
}
|
2019-02-06 14:44:19 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-30 23:37:39 -04:00
|
|
|
// HealFormat - heals missing `format.json` on fresh unformatted disks.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.HealResultItem, err error) {
|
2020-10-26 13:29:29 -04:00
|
|
|
storageDisks, errs := initStorageDisksWithErrorsWithoutHealthCheck(s.endpoints)
|
2019-09-27 19:47:12 -04:00
|
|
|
for i, derr := range errs {
|
|
|
|
if derr != nil && derr != errDiskNotFound {
|
|
|
|
return madmin.HealResultItem{}, fmt.Errorf("Disk %s: %w", s.endpoints[i], derr)
|
|
|
|
}
|
2018-04-04 00:58:48 -04:00
|
|
|
}
|
2018-04-09 13:25:41 -04:00
|
|
|
|
|
|
|
defer func(storageDisks []StorageAPI) {
|
|
|
|
if err != nil {
|
|
|
|
closeStorageDisks(storageDisks)
|
|
|
|
}
|
|
|
|
}(storageDisks)
|
2018-04-04 00:58:48 -04:00
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
formats, sErrs := loadFormatErasureAll(storageDisks, true)
|
2021-01-29 14:40:55 -05:00
|
|
|
if err = checkFormatErasureValues(formats, storageDisks, s.setDriveCount); err != nil {
|
2018-02-15 20:45:57 -05:00
|
|
|
return madmin.HealResultItem{}, err
|
|
|
|
}
|
|
|
|
|
2020-08-18 17:37:26 -04:00
|
|
|
// Mark all root disks down
|
|
|
|
markRootDisksAsDown(storageDisks, sErrs)
|
|
|
|
|
2020-09-04 20:09:02 -04:00
|
|
|
refFormat, err := getFormatErasureInQuorum(formats)
|
|
|
|
if err != nil {
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Prepare heal-result
|
2018-04-09 13:25:41 -04:00
|
|
|
res = madmin.HealResultItem{
|
2018-02-15 20:45:57 -05:00
|
|
|
Type: madmin.HealItemMetadata,
|
|
|
|
Detail: "disk-format",
|
2020-08-26 22:29:35 -04:00
|
|
|
DiskCount: s.setCount * s.setDriveCount,
|
2018-02-15 20:45:57 -05:00
|
|
|
SetCount: s.setCount,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch all the drive info status.
|
|
|
|
beforeDrives := formatsToDrivesInfo(s.endpoints, formats, sErrs)
|
|
|
|
|
|
|
|
res.After.Drives = make([]madmin.HealDriveInfo, len(beforeDrives))
|
|
|
|
res.Before.Drives = make([]madmin.HealDriveInfo, len(beforeDrives))
|
|
|
|
// Copy "after" drive state too from before.
|
|
|
|
for k, v := range beforeDrives {
|
2021-03-04 17:36:23 -05:00
|
|
|
res.Before.Drives[k] = v
|
|
|
|
res.After.Drives[k] = v
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-09-24 21:47:26 -04:00
|
|
|
if countErrs(sErrs, errUnformattedDisk) == 0 {
|
2018-04-30 23:37:39 -04:00
|
|
|
return res, errNoHealRequired
|
|
|
|
}
|
|
|
|
|
2018-02-15 20:45:57 -05:00
|
|
|
// Initialize a new set of set formats which will be written to disk.
|
2020-08-26 22:29:35 -04:00
|
|
|
newFormatSets := newHealFormatSets(refFormat, s.setCount, s.setDriveCount, formats, sErrs)
|
2018-02-15 20:45:57 -05:00
|
|
|
|
|
|
|
if !dryRun {
|
2020-08-26 22:29:35 -04:00
|
|
|
var tmpNewFormats = make([]*formatErasureV3, s.setCount*s.setDriveCount)
|
2018-02-15 20:45:57 -05:00
|
|
|
for i := range newFormatSets {
|
|
|
|
for j := range newFormatSets[i] {
|
|
|
|
if newFormatSets[i][j] == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-10-26 13:29:29 -04:00
|
|
|
res.After.Drives[i*s.setDriveCount+j].UUID = newFormatSets[i][j].Erasure.This
|
|
|
|
res.After.Drives[i*s.setDriveCount+j].State = madmin.DriveStateOk
|
2020-08-26 22:29:35 -04:00
|
|
|
tmpNewFormats[i*s.setDriveCount+j] = newFormatSets[i][j]
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-31 04:34:48 -04:00
|
|
|
// Save new formats `format.json` on unformatted disks.
|
|
|
|
if err = saveUnformattedFormat(ctx, storageDisks, tmpNewFormats); err != nil {
|
2020-08-07 16:22:53 -04:00
|
|
|
return madmin.HealResultItem{}, err
|
|
|
|
}
|
|
|
|
|
2020-09-17 00:14:35 -04:00
|
|
|
s.erasureDisksMu.Lock()
|
2018-04-09 13:25:41 -04:00
|
|
|
|
2020-10-31 04:34:48 -04:00
|
|
|
for index, format := range tmpNewFormats {
|
|
|
|
if format == nil {
|
2020-03-27 17:48:30 -04:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-10-31 04:34:48 -04:00
|
|
|
m, n, err := findDiskIndexByDiskID(refFormat, format.Erasure.This)
|
2020-03-27 17:48:30 -04:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-06-12 23:04:01 -04:00
|
|
|
if s.erasureDisks[m][n] != nil {
|
|
|
|
s.erasureDisks[m][n].Close()
|
2020-03-27 17:48:30 -04:00
|
|
|
}
|
2021-03-04 17:36:23 -05:00
|
|
|
storageDisks[index].SetDiskLoc(s.poolIndex, m, n)
|
2020-10-31 04:34:48 -04:00
|
|
|
s.erasureDisks[m][n] = storageDisks[index]
|
|
|
|
s.endpointStrings[m*s.setDriveCount+n] = storageDisks[index].String()
|
2020-03-27 17:48:30 -04:00
|
|
|
}
|
2018-04-09 13:25:41 -04:00
|
|
|
|
2020-10-31 17:10:12 -04:00
|
|
|
// Replace reference format with what was loaded from disks.
|
2020-10-31 04:34:48 -04:00
|
|
|
s.format = refFormat
|
|
|
|
|
2020-09-17 00:14:35 -04:00
|
|
|
s.erasureDisksMu.Unlock()
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// HealBucket - heals inconsistent buckets and bucket metadata on all sets.
|
2020-12-13 14:57:08 -05:00
|
|
|
func (s *erasureSets) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (result madmin.HealResultItem, err error) {
|
2018-02-15 20:45:57 -05:00
|
|
|
// Initialize heal result info
|
2019-02-10 22:53:13 -05:00
|
|
|
result = madmin.HealResultItem{
|
2018-02-15 20:45:57 -05:00
|
|
|
Type: madmin.HealItemBucket,
|
|
|
|
Bucket: bucket,
|
2020-08-26 22:29:35 -04:00
|
|
|
DiskCount: s.setCount * s.setDriveCount,
|
2018-02-15 20:45:57 -05:00
|
|
|
SetCount: s.setCount,
|
|
|
|
}
|
|
|
|
|
2021-02-23 12:23:11 -05:00
|
|
|
for _, set := range s.sets {
|
2019-02-10 22:53:13 -05:00
|
|
|
var healResult madmin.HealResultItem
|
2021-02-23 12:23:11 -05:00
|
|
|
healResult, err = set.HealBucket(ctx, bucket, opts)
|
2019-02-10 22:53:13 -05:00
|
|
|
if err != nil {
|
2021-02-23 12:23:11 -05:00
|
|
|
return result, toObjectErr(err, bucket)
|
2019-02-10 22:53:13 -05:00
|
|
|
}
|
2019-02-13 07:59:36 -05:00
|
|
|
result.Before.Drives = append(result.Before.Drives, healResult.Before.Drives...)
|
|
|
|
result.After.Drives = append(result.After.Drives, healResult.After.Drives...)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we had quorum to write, if not return an appropriate error.
|
2019-02-10 22:53:13 -05:00
|
|
|
_, afterDriveOnline := result.GetOnlineCounts()
|
2020-08-26 22:29:35 -04:00
|
|
|
if afterDriveOnline < ((s.setCount*s.setDriveCount)/2)+1 {
|
2020-06-12 23:04:01 -04:00
|
|
|
return result, toObjectErr(errErasureWriteQuorum, bucket)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2019-02-10 22:53:13 -05:00
|
|
|
return result, nil
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// HealObject - heals inconsistent object on a hashedSet based on object name.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
|
|
|
|
return s.getHashedSet(object).HealObject(ctx, bucket, object, versionID, opts)
|
2018-02-15 20:45:57 -05:00
|
|
|
}
|
|
|
|
|
2020-05-23 14:09:35 -04:00
|
|
|
// PutObjectTags - replace or add tags to an existing object
|
2021-02-01 16:52:51 -05:00
|
|
|
func (s *erasureSets) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
er := s.getHashedSet(object)
|
|
|
|
return er.PutObjectTags(ctx, bucket, object, tags, opts)
|
2020-01-20 11:45:59 -05:00
|
|
|
}
|
|
|
|
|
2020-05-23 14:09:35 -04:00
|
|
|
// DeleteObjectTags - delete object tags from an existing object
|
2021-02-01 16:52:51 -05:00
|
|
|
func (s *erasureSets) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
er := s.getHashedSet(object)
|
|
|
|
return er.DeleteObjectTags(ctx, bucket, object, opts)
|
2020-01-20 11:45:59 -05:00
|
|
|
}
|
|
|
|
|
2020-05-23 14:09:35 -04:00
|
|
|
// GetObjectTags - get object tags from an existing object
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
|
2021-01-26 16:21:51 -05:00
|
|
|
er := s.getHashedSet(object)
|
|
|
|
return er.GetObjectTags(ctx, bucket, object, opts)
|
2020-01-20 11:45:59 -05:00
|
|
|
}
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
// maintainMRFList gathers the list of successful partial uploads
|
2020-06-12 23:04:01 -04:00
|
|
|
// from all underlying er.sets and puts them in a global map which
|
2020-01-15 21:30:32 -05:00
|
|
|
// should not have more than 10000 entries.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) maintainMRFList() {
|
2020-06-29 16:07:26 -04:00
|
|
|
var agg = make(chan partialOperation, 10000)
|
2020-06-12 23:04:01 -04:00
|
|
|
for i, er := range s.sets {
|
2020-06-29 16:07:26 -04:00
|
|
|
go func(c <-chan partialOperation, setIndex int) {
|
2020-01-15 21:30:32 -05:00
|
|
|
for msg := range c {
|
|
|
|
msg.failedSet = setIndex
|
|
|
|
select {
|
|
|
|
case agg <- msg:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
2020-06-29 16:07:26 -04:00
|
|
|
}(er.mrfOpCh, i)
|
2020-01-15 21:30:32 -05:00
|
|
|
}
|
|
|
|
|
2020-06-29 16:07:26 -04:00
|
|
|
for fOp := range agg {
|
2020-01-15 21:30:32 -05:00
|
|
|
s.mrfMU.Lock()
|
2020-06-29 16:07:26 -04:00
|
|
|
if len(s.mrfOperations) > 10000 {
|
2020-01-15 21:30:32 -05:00
|
|
|
s.mrfMU.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
2020-06-29 16:07:26 -04:00
|
|
|
s.mrfOperations[healSource{
|
|
|
|
bucket: fOp.bucket,
|
|
|
|
object: fOp.object,
|
|
|
|
versionID: fOp.versionID,
|
|
|
|
}] = fOp.failedSet
|
2020-01-15 21:30:32 -05:00
|
|
|
s.mrfMU.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-17 15:35:02 -05:00
|
|
|
func toSourceChTimed(t *time.Timer, sourceCh chan healSource, u healSource) {
|
|
|
|
t.Reset(100 * time.Millisecond)
|
|
|
|
|
|
|
|
// No defer, as we don't know which
|
|
|
|
// case will be selected
|
|
|
|
|
|
|
|
select {
|
|
|
|
case sourceCh <- u:
|
|
|
|
case <-t.C:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We still need to check the return value
|
|
|
|
// of Stop, because t could have fired
|
|
|
|
// between the send on sourceCh and this line.
|
|
|
|
if !t.Stop() {
|
|
|
|
<-t.C
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
// healMRFRoutine monitors new disks connection, sweep the MRF list
|
|
|
|
// to find objects related to the new disk that needs to be healed.
|
2020-06-12 23:04:01 -04:00
|
|
|
func (s *erasureSets) healMRFRoutine() {
|
2020-01-15 21:30:32 -05:00
|
|
|
// Wait until background heal state is initialized
|
2020-12-13 14:57:08 -05:00
|
|
|
bgSeq := mustGetHealSequence(GlobalContext)
|
2020-01-15 21:30:32 -05:00
|
|
|
|
2020-12-16 17:33:05 -05:00
|
|
|
idler := time.NewTimer(100 * time.Millisecond)
|
|
|
|
defer idler.Stop()
|
|
|
|
|
2020-01-15 21:30:32 -05:00
|
|
|
for e := range s.disksConnectEvent {
|
2020-06-12 23:04:01 -04:00
|
|
|
// Get the list of objects related the er.set
|
2020-01-15 21:30:32 -05:00
|
|
|
// to which the connected disk belongs.
|
2020-06-29 16:07:26 -04:00
|
|
|
var mrfOperations []healSource
|
2020-01-15 21:30:32 -05:00
|
|
|
s.mrfMU.Lock()
|
2020-06-29 16:07:26 -04:00
|
|
|
for k, v := range s.mrfOperations {
|
2020-01-15 21:30:32 -05:00
|
|
|
if v == e.setIndex {
|
2020-06-29 16:07:26 -04:00
|
|
|
mrfOperations = append(mrfOperations, k)
|
2020-01-15 21:30:32 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
s.mrfMU.Unlock()
|
|
|
|
|
|
|
|
// Heal objects
|
2020-06-29 16:07:26 -04:00
|
|
|
for _, u := range mrfOperations {
|
2020-12-17 15:35:02 -05:00
|
|
|
// Send an object to background heal
|
|
|
|
toSourceChTimed(idler, bgSeq.sourceCh, u)
|
2020-01-15 21:30:32 -05:00
|
|
|
|
|
|
|
s.mrfMU.Lock()
|
2020-06-29 16:07:26 -04:00
|
|
|
delete(s.mrfOperations, u)
|
2020-01-15 21:30:32 -05:00
|
|
|
s.mrfMU.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|