2021-04-18 15:41:13 -04:00
// Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
2019-11-19 20:42:27 -05:00
package cmd
import (
2021-10-01 14:50:00 -04:00
"bytes"
2019-11-19 20:42:27 -05:00
"context"
2020-09-17 00:14:35 -04:00
"errors"
2019-11-19 20:42:27 -05:00
"fmt"
"io"
"math/rand"
"net/http"
2020-12-15 20:34:54 -05:00
"sort"
2020-08-13 18:21:20 -04:00
"strconv"
2022-04-19 11:20:48 -04:00
"strings"
2019-12-12 09:02:37 -05:00
"sync"
2020-03-18 19:19:29 -04:00
"time"
2019-11-19 20:42:27 -05:00
2021-05-06 11:52:02 -04:00
"github.com/minio/madmin-go"
2020-07-14 12:38:05 -04:00
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
2022-03-22 15:39:45 -04:00
"github.com/minio/minio/internal/bucket/lifecycle"
2021-06-01 17:59:40 -04:00
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/sync/errgroup"
2021-05-28 18:17:01 -04:00
"github.com/minio/pkg/wildcard"
2019-11-19 20:42:27 -05:00
)
2020-12-01 16:50:33 -05:00
type erasureServerPools struct {
2020-05-19 16:53:54 -04:00
GatewayUnsupported
2022-01-10 12:07:49 -05:00
poolMetaMutex sync . RWMutex
poolMeta poolMeta
serverPools [ ] * erasureSets
2020-09-10 12:18:19 -04:00
// Shut down async operations
shutdown context . CancelFunc
2022-01-10 12:07:49 -05:00
// Active decommission canceler
decommissionCancelers [ ] context . CancelFunc
2019-11-19 20:42:27 -05:00
}
2021-01-26 23:47:42 -05:00
func ( z * erasureServerPools ) SinglePool ( ) bool {
2020-12-01 16:50:33 -05:00
return len ( z . serverPools ) == 1
2019-11-19 20:42:27 -05:00
}
2021-01-06 12:35:47 -05:00
// Initialize new pool of erasure sets.
2020-12-01 16:50:33 -05:00
func newErasureServerPools ( ctx context . Context , endpointServerPools EndpointServerPools ) ( ObjectLayer , error ) {
2022-05-30 13:58:37 -04:00
if endpointServerPools . NEndpoints ( ) == 1 {
ep := endpointServerPools [ 0 ]
storageDisks , format , err := waitForFormatErasure ( true , ep . Endpoints , 1 , ep . SetCount , ep . DrivesPerSet , "" , "" )
if err != nil {
return nil , err
}
objLayer , err := newErasureSingle ( ctx , storageDisks [ 0 ] , format )
if err != nil {
return nil , err
}
globalLocalDrives = storageDisks
return objLayer , nil
}
2019-11-21 07:24:51 -05:00
var (
2021-01-19 13:01:31 -05:00
deploymentID string
distributionAlgo string
commonParityDrives int
err error
2019-11-21 07:24:51 -05:00
2020-12-01 16:50:33 -05:00
formats = make ( [ ] * formatErasureV3 , len ( endpointServerPools ) )
storageDisks = make ( [ ] [ ] StorageAPI , len ( endpointServerPools ) )
2022-01-10 12:07:49 -05:00
z = & erasureServerPools {
serverPools : make ( [ ] * erasureSets , len ( endpointServerPools ) ) ,
}
2019-11-21 07:24:51 -05:00
)
2020-04-27 13:06:21 -04:00
2022-01-24 14:28:45 -05:00
var localDrives [ ] StorageAPI
2020-12-01 16:50:33 -05:00
local := endpointServerPools . FirstLocal ( )
for i , ep := range endpointServerPools {
2021-01-29 14:40:55 -05:00
// If storage class is not set during startup, default values are used
// -- Default for Reduced Redundancy Storage class is, parity = 2
// -- Default for Standard Storage class is, parity = 2 - disks 4, 5
// -- Default for Standard Storage class is, parity = 3 - disks 6, 7
// -- Default for Standard Storage class is, parity = 4 - disks 8 to 16
2021-01-19 13:01:31 -05:00
if commonParityDrives == 0 {
commonParityDrives = ecDrivesNoConfig ( ep . DrivesPerSet )
}
2021-01-29 14:40:55 -05:00
if err = storageclass . ValidateParity ( commonParityDrives , ep . DrivesPerSet ) ; err != nil {
return nil , fmt . Errorf ( "All current serverPools should have same parity ratio - expected %d, got %d" , commonParityDrives , ecDrivesNoConfig ( ep . DrivesPerSet ) )
2021-01-19 13:01:31 -05:00
}
2020-06-12 23:04:01 -04:00
storageDisks [ i ] , formats [ i ] , err = waitForFormatErasure ( local , ep . Endpoints , i + 1 ,
2021-01-19 13:01:31 -05:00
ep . SetCount , ep . DrivesPerSet , deploymentID , distributionAlgo )
2019-11-21 07:24:51 -05:00
if err != nil {
return nil , err
}
2021-01-19 13:01:31 -05:00
2022-01-24 14:28:45 -05:00
for _ , storageDisk := range storageDisks [ i ] {
if storageDisk != nil && storageDisk . IsLocal ( ) {
localDrives = append ( localDrives , storageDisk )
}
}
2019-11-21 07:24:51 -05:00
if deploymentID == "" {
2022-07-05 10:37:24 -04:00
// all pools should have same deployment ID
2019-11-21 07:24:51 -05:00
deploymentID = formats [ i ] . ID
}
2021-01-19 13:01:31 -05:00
if distributionAlgo == "" {
distributionAlgo = formats [ i ] . Erasure . DistributionAlgo
}
// Validate if users brought different DeploymentID pools.
if deploymentID != formats [ i ] . ID {
return nil , fmt . Errorf ( "All serverPools should have same deployment ID expected %s, got %s" , deploymentID , formats [ i ] . ID )
}
2022-01-10 12:07:49 -05:00
z . serverPools [ i ] , err = newErasureSets ( ctx , ep , storageDisks [ i ] , formats [ i ] , commonParityDrives , i )
2019-11-19 20:42:27 -05:00
if err != nil {
return nil , err
}
}
2022-01-10 12:07:49 -05:00
z . decommissionCancelers = make ( [ ] context . CancelFunc , len ( z . serverPools ) )
r := rand . New ( rand . NewSource ( time . Now ( ) . UnixNano ( ) ) )
for {
2022-01-24 14:28:45 -05:00
err := z . Init ( ctx ) // Initializes all pools.
2022-01-10 12:07:49 -05:00
if err != nil {
if ! configRetriableErrors ( err ) {
logger . Fatal ( err , "Unable to initialize backend" )
}
2022-03-20 17:46:43 -04:00
retry := time . Duration ( r . Float64 ( ) * float64 ( 5 * time . Second ) )
logger . LogIf ( ctx , fmt . Errorf ( "Unable to initialize backend: %w, retrying in %s" , err , retry ) )
time . Sleep ( retry )
2022-01-10 12:07:49 -05:00
continue
}
break
}
2022-01-24 14:28:45 -05:00
drives := make ( [ ] string , 0 , len ( localDrives ) )
for _ , localDrive := range localDrives {
drives = append ( drives , localDrive . Endpoint ( ) . Path )
}
globalLocalDrives = localDrives
2020-09-10 12:18:19 -04:00
ctx , z . shutdown = context . WithCancel ( ctx )
2022-01-24 14:28:45 -05:00
go intDataUpdateTracker . start ( ctx , drives ... )
2019-11-19 20:42:27 -05:00
return z , nil
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) NewNSLock ( bucket string , objects ... string ) RWLocker {
return z . serverPools [ 0 ] . NewNSLock ( bucket , objects ... )
2019-11-19 20:42:27 -05:00
}
2020-12-01 15:07:39 -05:00
// GetDisksID will return disks by their ID.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) GetDisksID ( ids ... string ) [ ] StorageAPI {
2020-12-01 15:07:39 -05:00
idMap := make ( map [ string ] struct { } )
for _ , id := range ids {
idMap [ id ] = struct { } { }
}
res := make ( [ ] StorageAPI , 0 , len ( idMap ) )
2020-12-11 19:58:36 -05:00
for _ , s := range z . serverPools {
s . erasureDisksMu . RLock ( )
defer s . erasureDisksMu . RUnlock ( )
for _ , disks := range s . erasureDisks {
2020-12-01 15:07:39 -05:00
for _ , disk := range disks {
2020-12-11 19:58:36 -05:00
if disk == OfflineDisk {
continue
}
if id , _ := disk . GetDiskID ( ) ; id != "" {
if _ , ok := idMap [ id ] ; ok {
res = append ( res , disk )
}
2020-12-01 15:07:39 -05:00
}
}
}
}
return res
}
2021-07-09 14:29:16 -04:00
// GetRawData will return all files with a given raw path to the callback.
// Errors are ignored, only errors from the callback are returned.
// For now only direct file paths are supported.
2021-10-21 14:20:13 -04:00
func ( z * erasureServerPools ) GetRawData ( ctx context . Context , volume , file string , fn func ( r io . Reader , host string , disk string , filename string , info StatInfo ) error ) error {
2021-10-01 14:50:00 -04:00
found := 0
2021-07-09 14:29:16 -04:00
for _ , s := range z . serverPools {
for _ , disks := range s . erasureDisks {
2022-01-24 14:28:45 -05:00
for _ , disk := range disks {
2021-07-09 14:29:16 -04:00
if disk == OfflineDisk {
continue
}
2021-10-01 14:50:00 -04:00
stats , err := disk . StatInfoFile ( ctx , volume , file , true )
2021-07-09 14:29:16 -04:00
if err != nil {
continue
}
2021-10-01 14:50:00 -04:00
for _ , si := range stats {
found ++
var r io . ReadCloser
if ! si . Dir {
r , err = disk . ReadFileStream ( ctx , volume , si . Name , 0 , si . Size )
if err != nil {
continue
}
} else {
r = io . NopCloser ( bytes . NewBuffer ( [ ] byte { } ) )
}
2022-01-24 14:28:45 -05:00
// Keep disk path instead of ID, to ensure that the downloaded zip file can be
// easily automated with `minio server hostname{1...n}/disk{1...m}`.
err = fn ( r , disk . Hostname ( ) , disk . Endpoint ( ) . Path , pathJoin ( volume , si . Name ) , si )
2021-10-01 14:50:00 -04:00
r . Close ( )
if err != nil {
return err
}
2021-07-09 14:29:16 -04:00
}
}
}
}
2021-10-01 14:50:00 -04:00
if found == 0 {
return errFileNotFound
}
2021-07-09 14:29:16 -04:00
return nil
}
2022-07-06 16:29:49 -04:00
// Return the count of disks in each pool
2021-01-22 15:09:24 -05:00
func ( z * erasureServerPools ) SetDriveCounts ( ) [ ] int {
setDriveCounts := make ( [ ] int , len ( z . serverPools ) )
for i := range z . serverPools {
setDriveCounts [ i ] = z . serverPools [ i ] . SetDriveCount ( )
}
return setDriveCounts
2020-08-05 16:31:12 -04:00
}
2021-01-06 12:35:47 -05:00
type serverPoolsAvailableSpace [ ] poolAvailableSpace
2019-11-19 20:42:27 -05:00
2021-01-06 12:35:47 -05:00
type poolAvailableSpace struct {
2022-05-25 16:20:20 -04:00
Index int
Available uint64
MaxUsedPct int // Used disk percentage of most filled disk, rounded down.
2019-11-19 20:42:27 -05:00
}
// TotalAvailable - total available space
2020-12-01 16:50:33 -05:00
func ( p serverPoolsAvailableSpace ) TotalAvailable ( ) uint64 {
2019-11-19 20:42:27 -05:00
total := uint64 ( 0 )
for _ , z := range p {
total += z . Available
}
return total
}
2022-05-25 16:20:20 -04:00
// FilterMaxUsed will filter out any pools that has used percent bigger than max,
// unless all have that, in which case all are preserved.
func ( p serverPoolsAvailableSpace ) FilterMaxUsed ( max int ) {
// We aren't modifying p, only entries in it, so we don't need to receive a pointer.
if len ( p ) <= 1 {
// Nothing to do.
return
}
var ok bool
for _ , z := range p {
if z . MaxUsedPct < max {
ok = true
break
}
}
if ! ok {
// All above limit.
// Do not modify
return
}
// Remove entries that are above.
for i , z := range p {
if z . MaxUsedPct < max {
continue
}
p [ i ] . Available = 0
}
}
2021-01-26 23:47:42 -05:00
// getAvailablePoolIdx will return an index that can hold size bytes.
2020-12-01 16:50:33 -05:00
// -1 is returned if no serverPools have available space for the size given.
2021-06-07 11:13:15 -04:00
func ( z * erasureServerPools ) getAvailablePoolIdx ( ctx context . Context , bucket , object string , size int64 ) int {
serverPools := z . getServerPoolsAvailableSpace ( ctx , bucket , object , size )
2022-05-25 16:20:20 -04:00
serverPools . FilterMaxUsed ( 100 - ( 100 * diskReserveFraction ) )
2020-12-01 16:50:33 -05:00
total := serverPools . TotalAvailable ( )
2019-11-19 20:42:27 -05:00
if total == 0 {
2020-06-20 09:36:44 -04:00
return - 1
2019-11-19 20:42:27 -05:00
}
// choose when we reach this many
choose := rand . Uint64 ( ) % total
atTotal := uint64 ( 0 )
2021-01-06 12:35:47 -05:00
for _ , pool := range serverPools {
atTotal += pool . Available
if atTotal > choose && pool . Available > 0 {
return pool . Index
2019-11-19 20:42:27 -05:00
}
}
// Should not happen, but print values just in case.
2020-12-01 16:50:33 -05:00
logger . LogIf ( ctx , fmt . Errorf ( "reached end of serverPools (total: %v, atTotal: %v, choose: %v)" , total , atTotal , choose ) )
2020-06-20 09:36:44 -04:00
return - 1
2019-11-19 20:42:27 -05:00
}
2021-01-06 12:35:47 -05:00
// getServerPoolsAvailableSpace will return the available space of each pool after storing the content.
// If there is not enough space the pool will return 0 bytes available.
2022-05-24 21:57:14 -04:00
// The size of each will be multiplied by the number of sets.
2020-06-20 09:36:44 -04:00
// Negative sizes are seen as 0 bytes.
2021-06-07 11:13:15 -04:00
func ( z * erasureServerPools ) getServerPoolsAvailableSpace ( ctx context . Context , bucket , object string , size int64 ) serverPoolsAvailableSpace {
2022-01-02 12:15:06 -05:00
serverPools := make ( serverPoolsAvailableSpace , len ( z . serverPools ) )
2019-11-19 20:42:27 -05:00
2021-06-07 11:13:15 -04:00
storageInfos := make ( [ ] [ ] * DiskInfo , len ( z . serverPools ) )
2022-05-24 21:57:14 -04:00
nSets := make ( [ ] int , len ( z . serverPools ) )
2020-12-01 16:50:33 -05:00
g := errgroup . WithNErrs ( len ( z . serverPools ) )
for index := range z . serverPools {
2019-11-19 20:42:27 -05:00
index := index
2022-01-10 12:07:49 -05:00
// skip suspended pools for any new I/O.
2022-01-11 15:27:47 -05:00
if z . IsSuspended ( index ) {
2022-01-10 12:07:49 -05:00
continue
}
2022-05-24 21:57:14 -04:00
pool := z . serverPools [ index ]
nSets [ index ] = pool . setCount
2019-11-19 20:42:27 -05:00
g . Go ( func ( ) error {
2021-06-07 11:13:15 -04:00
// Get the set where it would be placed.
2022-05-30 13:58:37 -04:00
storageInfos [ index ] = getDiskInfos ( ctx , pool . getHashedSet ( object ) . getDisks ( ) ... )
2019-11-19 20:42:27 -05:00
return nil
} , index )
}
// Wait for the go routines.
g . Wait ( )
for i , zinfo := range storageInfos {
var available uint64
2021-06-07 11:13:15 -04:00
if ! isMinioMetaBucketName ( bucket ) && ! hasSpaceFor ( zinfo , size ) {
serverPools [ i ] = poolAvailableSpace { Index : i }
continue
2020-06-20 09:36:44 -04:00
}
2022-05-25 16:20:20 -04:00
var maxUsedPct int
2021-06-07 11:13:15 -04:00
for _ , disk := range zinfo {
2022-05-26 09:01:50 -04:00
if disk == nil || disk . Total == 0 {
2021-06-09 14:14:47 -04:00
continue
}
2021-06-07 11:13:15 -04:00
available += disk . Total - disk . Used
2022-05-25 16:20:20 -04:00
// set maxUsedPct to the value from the disk with the least space percentage.
if pctUsed := int ( disk . Used * 100 / disk . Total ) ; pctUsed > maxUsedPct {
maxUsedPct = pctUsed
}
2020-06-20 09:36:44 -04:00
}
2022-05-24 21:57:14 -04:00
// Since we are comparing pools that may have a different number of sets
// we multiply by the number of sets in the pool.
// This will compensate for differences in set sizes
// when choosing destination pool.
// Different set sizes are already compensated by less disks.
available *= uint64 ( nSets [ i ] )
2021-01-06 12:35:47 -05:00
serverPools [ i ] = poolAvailableSpace {
2022-05-25 16:20:20 -04:00
Index : i ,
Available : available ,
MaxUsedPct : maxUsedPct ,
2019-11-19 20:42:27 -05:00
}
}
2020-12-01 16:50:33 -05:00
return serverPools
2019-11-19 20:42:27 -05:00
}
2022-08-18 19:41:59 -04:00
// PoolObjInfo represents the state of current object version per pool
type PoolObjInfo struct {
Index int
ObjInfo ObjectInfo
Err error
2021-06-11 02:07:16 -04:00
}
2022-08-18 19:41:59 -04:00
func ( z * erasureServerPools ) getPoolInfoExistingWithOpts ( ctx context . Context , bucket , object string , opts ObjectOptions ) ( PoolObjInfo , error ) {
poolObjInfos := make ( [ ] PoolObjInfo , len ( z . serverPools ) )
2022-01-10 12:07:49 -05:00
poolOpts := make ( [ ] ObjectOptions , len ( z . serverPools ) )
for i := range z . serverPools {
poolOpts [ i ] = opts
}
2021-03-16 14:02:20 -04:00
var wg sync . WaitGroup
for i , pool := range z . serverPools {
wg . Add ( 1 )
2022-01-10 12:07:49 -05:00
go func ( i int , pool * erasureSets , opts ObjectOptions ) {
2021-03-16 14:02:20 -04:00
defer wg . Done ( )
2021-06-11 02:07:16 -04:00
// remember the pool index, we may sort the slice original index might be lost.
2022-08-18 19:41:59 -04:00
pinfo := PoolObjInfo {
Index : i ,
2021-06-11 02:07:16 -04:00
}
2022-02-08 23:08:23 -05:00
// do not remove this check as it can lead to inconsistencies
// for all callers of bucket replication.
opts . VersionID = ""
2021-08-17 10:50:00 -04:00
pinfo . ObjInfo , pinfo . Err = pool . GetObjectInfo ( ctx , bucket , object , opts )
2021-06-11 02:07:16 -04:00
poolObjInfos [ i ] = pinfo
2022-01-10 12:07:49 -05:00
} ( i , pool , poolOpts [ i ] )
2021-03-16 14:02:20 -04:00
}
wg . Wait ( )
2021-06-11 02:07:16 -04:00
// Sort the objInfos such that we always serve latest
// this is a defensive change to handle any duplicate
// content that may have been created, we always serve
// the latest object.
sort . Slice ( poolObjInfos , func ( i , j int ) bool {
mtime1 := poolObjInfos [ i ] . ObjInfo . ModTime
mtime2 := poolObjInfos [ j ] . ObjInfo . ModTime
return mtime1 . After ( mtime2 )
} )
for _ , pinfo := range poolObjInfos {
2022-07-16 22:35:24 -04:00
// skip all objects from suspended pools if asked by the
// caller.
2022-08-18 19:41:59 -04:00
if z . IsSuspended ( pinfo . Index ) && opts . SkipDecommissioned {
2022-01-10 12:07:49 -05:00
continue
}
2022-02-08 23:08:23 -05:00
if pinfo . Err != nil && ! isErrObjectNotFound ( pinfo . Err ) {
2022-08-18 19:41:59 -04:00
return pinfo , pinfo . Err
2022-02-08 23:08:23 -05:00
}
2021-06-11 02:07:16 -04:00
if isErrObjectNotFound ( pinfo . Err ) {
2021-03-16 14:02:20 -04:00
// No object exists or its a delete marker,
// check objInfo to confirm.
2021-06-11 02:07:16 -04:00
if pinfo . ObjInfo . DeleteMarker && pinfo . ObjInfo . Name != "" {
2022-08-18 19:41:59 -04:00
return pinfo , nil
2021-03-16 14:02:20 -04:00
}
// objInfo is not valid, truly the object doesn't
// exist proceed to next pool.
continue
}
2022-01-10 12:07:49 -05:00
2022-08-18 19:41:59 -04:00
return pinfo , nil
2021-03-16 14:02:20 -04:00
}
2022-08-18 19:41:59 -04:00
return PoolObjInfo { } , toObjectErr ( errFileNotFound , bucket , object )
}
func ( z * erasureServerPools ) getPoolIdxExistingWithOpts ( ctx context . Context , bucket , object string , opts ObjectOptions ) ( idx int , err error ) {
pinfo , err := z . getPoolInfoExistingWithOpts ( ctx , bucket , object , opts )
if err != nil {
return - 1 , err
}
return pinfo . Index , nil
2021-03-16 14:02:20 -04:00
}
2022-01-10 12:07:49 -05:00
// getPoolIdxExistingNoLock returns the (first) found object pool index containing an object.
2021-08-17 10:50:00 -04:00
// If the object exists, but the latest version is a delete marker, the index with it is still returned.
// If the object does not exist ObjectNotFound error is returned.
// If any other error is found, it is returned.
2022-07-05 10:37:24 -04:00
// The check is skipped if there is only one pool, and 0, nil is always returned in that case.
2022-01-10 12:07:49 -05:00
func ( z * erasureServerPools ) getPoolIdxExistingNoLock ( ctx context . Context , bucket , object string ) ( idx int , err error ) {
return z . getPoolIdxExistingWithOpts ( ctx , bucket , object , ObjectOptions {
2022-07-16 22:35:24 -04:00
NoLock : true ,
SkipDecommissioned : true ,
2022-01-10 12:07:49 -05:00
} )
2021-08-17 10:50:00 -04:00
}
2021-09-23 00:46:24 -04:00
func ( z * erasureServerPools ) getPoolIdxNoLock ( ctx context . Context , bucket , object string , size int64 ) ( idx int , err error ) {
idx , err = z . getPoolIdxExistingNoLock ( ctx , bucket , object )
if err != nil && ! isErrObjectNotFound ( err ) {
return idx , err
}
if isErrObjectNotFound ( err ) {
idx = z . getAvailablePoolIdx ( ctx , bucket , object , size )
if idx < 0 {
return - 1 , toObjectErr ( errDiskFull )
}
}
return idx , nil
}
2021-01-26 23:47:42 -05:00
// getPoolIdx returns the found previous object and its corresponding pool idx,
2022-01-10 12:07:49 -05:00
// if none are found falls back to most available space pool, this function is
// designed to be only used by PutObject, CopyObject (newObject creation) and NewMultipartUpload.
2021-02-10 14:45:02 -05:00
func ( z * erasureServerPools ) getPoolIdx ( ctx context . Context , bucket , object string , size int64 ) ( idx int , err error ) {
2022-07-16 22:35:24 -04:00
idx , err = z . getPoolIdxExistingWithOpts ( ctx , bucket , object , ObjectOptions { SkipDecommissioned : true } )
2021-05-06 13:45:33 -04:00
if err != nil && ! isErrObjectNotFound ( err ) {
return idx , err
2020-06-17 11:33:14 -04:00
}
2021-02-16 22:36:15 -05:00
2021-05-06 13:45:33 -04:00
if isErrObjectNotFound ( err ) {
2021-06-07 11:13:15 -04:00
idx = z . getAvailablePoolIdx ( ctx , bucket , object , size )
2021-05-06 13:45:33 -04:00
if idx < 0 {
return - 1 , toObjectErr ( errDiskFull )
2020-06-17 11:33:14 -04:00
}
}
2020-06-20 09:36:44 -04:00
return idx , nil
2020-06-17 11:33:14 -04:00
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) Shutdown ( ctx context . Context ) error {
2020-09-10 12:18:19 -04:00
defer z . shutdown ( )
2019-11-19 20:42:27 -05:00
2020-12-01 16:50:33 -05:00
g := errgroup . WithNErrs ( len ( z . serverPools ) )
2019-11-19 20:42:27 -05:00
2020-12-01 16:50:33 -05:00
for index := range z . serverPools {
2019-11-19 20:42:27 -05:00
index := index
g . Go ( func ( ) error {
2020-12-01 16:50:33 -05:00
return z . serverPools [ index ] . Shutdown ( ctx )
2019-11-19 20:42:27 -05:00
} , index )
}
for _ , err := range g . Wait ( ) {
if err != nil {
logger . LogIf ( ctx , err )
}
// let's the rest shutdown
}
return nil
}
2021-03-04 17:36:23 -05:00
func ( z * erasureServerPools ) BackendInfo ( ) ( b madmin . BackendInfo ) {
b . Type = madmin . Erasure
2020-12-21 12:35:19 -05:00
scParity := globalStorageClass . GetParityForSC ( storageclass . STANDARD )
2022-06-27 23:22:18 -04:00
if scParity < 0 {
2021-01-16 15:08:02 -05:00
scParity = z . serverPools [ 0 ] . defaultParityCount
2020-12-21 12:35:19 -05:00
}
rrSCParity := globalStorageClass . GetParityForSC ( storageclass . RRS )
2021-01-22 15:09:24 -05:00
// Data blocks can vary per pool, but parity is same.
2022-07-27 17:41:59 -04:00
for i , setDriveCount := range z . SetDriveCounts ( ) {
2021-01-22 15:09:24 -05:00
b . StandardSCData = append ( b . StandardSCData , setDriveCount - scParity )
b . RRSCData = append ( b . RRSCData , setDriveCount - rrSCParity )
2022-07-27 17:41:59 -04:00
b . DrivesPerSet = append ( b . DrivesPerSet , setDriveCount )
b . TotalSets = append ( b . TotalSets , z . serverPools [ i ] . setCount )
2021-01-22 15:09:24 -05:00
}
b . StandardSCParity = scParity
2020-12-21 12:35:19 -05:00
b . RRSCParity = rrSCParity
return
}
2021-03-02 20:28:04 -05:00
func ( z * erasureServerPools ) LocalStorageInfo ( ctx context . Context ) ( StorageInfo , [ ] error ) {
var storageInfo StorageInfo
storageInfos := make ( [ ] StorageInfo , len ( z . serverPools ) )
storageInfosErrs := make ( [ ] [ ] error , len ( z . serverPools ) )
g := errgroup . WithNErrs ( len ( z . serverPools ) )
for index := range z . serverPools {
index := index
g . Go ( func ( ) error {
storageInfos [ index ] , storageInfosErrs [ index ] = z . serverPools [ index ] . LocalStorageInfo ( ctx )
return nil
} , index )
}
// Wait for the go routines.
g . Wait ( )
storageInfo . Backend = z . BackendInfo ( )
for _ , lstorageInfo := range storageInfos {
storageInfo . Disks = append ( storageInfo . Disks , lstorageInfo . Disks ... )
}
var errs [ ] error
for i := range z . serverPools {
errs = append ( errs , storageInfosErrs [ i ] ... )
}
return storageInfo , errs
}
2021-01-04 12:42:09 -05:00
func ( z * erasureServerPools ) StorageInfo ( ctx context . Context ) ( StorageInfo , [ ] error ) {
2019-11-19 20:42:27 -05:00
var storageInfo StorageInfo
2020-12-01 16:50:33 -05:00
storageInfos := make ( [ ] StorageInfo , len ( z . serverPools ) )
storageInfosErrs := make ( [ ] [ ] error , len ( z . serverPools ) )
g := errgroup . WithNErrs ( len ( z . serverPools ) )
for index := range z . serverPools {
2019-11-19 20:42:27 -05:00
index := index
g . Go ( func ( ) error {
2021-01-04 12:42:09 -05:00
storageInfos [ index ] , storageInfosErrs [ index ] = z . serverPools [ index ] . StorageInfo ( ctx )
2019-11-19 20:42:27 -05:00
return nil
} , index )
}
// Wait for the go routines.
g . Wait ( )
2020-12-21 12:35:19 -05:00
storageInfo . Backend = z . BackendInfo ( )
2019-11-19 20:42:27 -05:00
for _ , lstorageInfo := range storageInfos {
2020-07-13 12:51:07 -04:00
storageInfo . Disks = append ( storageInfo . Disks , lstorageInfo . Disks ... )
2020-10-22 16:36:24 -04:00
}
2020-05-28 16:03:04 -04:00
var errs [ ] error
2020-12-01 16:50:33 -05:00
for i := range z . serverPools {
2020-05-28 16:03:04 -04:00
errs = append ( errs , storageInfosErrs [ i ] ... )
}
return storageInfo , errs
2019-11-19 20:42:27 -05:00
}
2022-04-07 11:10:40 -04:00
func ( z * erasureServerPools ) NSScanner ( ctx context . Context , bf * bloomFilter , updates chan <- DataUsageInfo , wantCycle uint32 , healScanMode madmin . HealScanMode ) error {
2021-05-19 22:25:44 -04:00
// Updates must be closed before we return.
defer close ( updates )
2020-03-18 19:19:29 -04:00
ctx , cancel := context . WithCancel ( ctx )
defer cancel ( )
2020-06-12 23:04:01 -04:00
2019-12-12 09:02:37 -05:00
var wg sync . WaitGroup
2020-03-18 19:19:29 -04:00
var mu sync . Mutex
var results [ ] dataUsageCache
var firstErr error
2020-12-15 20:34:54 -05:00
2022-07-25 20:51:32 -04:00
allBuckets , err := z . ListBuckets ( ctx , BucketOptions { } )
2020-12-15 20:34:54 -05:00
if err != nil {
return err
}
2021-01-07 12:52:53 -05:00
if len ( allBuckets ) == 0 {
2021-09-18 16:31:35 -04:00
updates <- DataUsageInfo { } // no buckets found update data usage to reflect latest state
2021-01-07 12:52:53 -05:00
return nil
}
2021-02-26 18:11:42 -05:00
// Scanner latest allBuckets first.
2020-12-15 20:34:54 -05:00
sort . Slice ( allBuckets , func ( i , j int ) bool {
return allBuckets [ i ] . Created . After ( allBuckets [ j ] . Created )
} )
2020-03-18 19:19:29 -04:00
2020-12-01 16:50:33 -05:00
// Collect for each set in serverPools.
for _ , z := range z . serverPools {
2020-09-24 12:53:38 -04:00
for _ , erObj := range z . sets {
2020-03-18 19:19:29 -04:00
wg . Add ( 1 )
results = append ( results , dataUsageCache { } )
2020-06-12 23:04:01 -04:00
go func ( i int , erObj * erasureObjects ) {
2020-03-18 19:19:29 -04:00
updates := make ( chan dataUsageCache , 1 )
defer close ( updates )
// Start update collector.
go func ( ) {
defer wg . Done ( )
for info := range updates {
mu . Lock ( )
results [ i ] = info
mu . Unlock ( )
}
} ( )
2021-02-17 15:04:11 -05:00
// Start scanner. Blocks until done.
2022-04-07 11:10:40 -04:00
err := erObj . nsScanner ( ctx , allBuckets , bf , wantCycle , updates , healScanMode )
2020-03-18 19:19:29 -04:00
if err != nil {
2020-06-12 13:28:21 -04:00
logger . LogIf ( ctx , err )
2020-03-18 19:19:29 -04:00
mu . Lock ( )
if firstErr == nil {
firstErr = err
}
// Cancel remaining...
cancel ( )
mu . Unlock ( )
return
2019-12-12 09:02:37 -05:00
}
2020-06-12 23:04:01 -04:00
} ( len ( results ) - 1 , erObj )
2019-12-12 09:02:37 -05:00
}
}
2020-03-18 19:19:29 -04:00
updateCloser := make ( chan chan struct { } )
go func ( ) {
updateTicker := time . NewTicker ( 30 * time . Second )
defer updateTicker . Stop ( )
var lastUpdate time . Time
2020-07-14 21:59:05 -04:00
2021-01-06 12:35:47 -05:00
// We need to merge since we will get the same buckets from each pool.
2020-07-14 21:59:05 -04:00
// Therefore to get the exact bucket sizes we must merge before we can convert.
2020-07-24 14:02:10 -04:00
var allMerged dataUsageCache
2020-07-14 21:59:05 -04:00
2020-03-18 19:19:29 -04:00
update := func ( ) {
mu . Lock ( )
defer mu . Unlock ( )
2020-07-24 14:02:10 -04:00
allMerged = dataUsageCache { Info : dataUsageCacheInfo { Name : dataUsageRoot } }
2020-03-18 19:19:29 -04:00
for _ , info := range results {
if info . Info . LastUpdate . IsZero ( ) {
// Not filled yet.
return
}
allMerged . merge ( info )
}
if allMerged . root ( ) != nil && allMerged . Info . LastUpdate . After ( lastUpdate ) {
updates <- allMerged . dui ( allMerged . Info . Name , allBuckets )
lastUpdate = allMerged . Info . LastUpdate
}
}
for {
select {
case <- ctx . Done ( ) :
return
case v := <- updateCloser :
update ( )
close ( v )
return
case <- updateTicker . C :
update ( )
}
}
} ( )
2019-12-12 09:02:37 -05:00
wg . Wait ( )
2020-03-18 19:19:29 -04:00
ch := make ( chan struct { } )
2020-06-12 13:28:21 -04:00
select {
case updateCloser <- ch :
<- ch
case <- ctx . Done ( ) :
2020-08-24 13:15:46 -04:00
if firstErr == nil {
firstErr = ctx . Err ( )
}
2020-06-12 13:28:21 -04:00
}
2020-03-18 19:19:29 -04:00
return firstErr
2019-12-12 09:02:37 -05:00
}
2020-12-01 16:50:33 -05:00
// MakeBucketWithLocation - creates a new bucket across all serverPools simultaneously
2019-11-19 20:42:27 -05:00
// even if one of the sets fail to create buckets, we proceed all the successful
// operations.
2022-07-25 20:51:32 -04:00
func ( z * erasureServerPools ) MakeBucketWithLocation ( ctx context . Context , bucket string , opts MakeBucketOptions ) error {
2020-12-01 16:50:33 -05:00
g := errgroup . WithNErrs ( len ( z . serverPools ) )
2019-11-19 20:42:27 -05:00
2021-08-19 16:21:02 -04:00
// Lock the bucket name before creating.
lk := z . NewNSLock ( minioMetaTmpBucket , bucket + ".lck" )
lkctx , err := lk . GetLock ( ctx , globalOperationTimeout )
if err != nil {
return err
}
ctx = lkctx . Context ( )
defer lk . Unlock ( lkctx . Cancel )
2019-11-19 20:42:27 -05:00
// Create buckets in parallel across all sets.
2020-12-01 16:50:33 -05:00
for index := range z . serverPools {
2019-11-19 20:42:27 -05:00
index := index
g . Go ( func ( ) error {
2022-03-07 19:18:57 -05:00
if z . IsSuspended ( index ) {
return nil
}
2020-12-01 16:50:33 -05:00
return z . serverPools [ index ] . MakeBucketWithLocation ( ctx , bucket , opts )
2019-11-19 20:42:27 -05:00
} , index )
}
errs := g . Wait ( )
2020-05-12 18:20:42 -04:00
// Return the first encountered error
2019-11-19 20:42:27 -05:00
for _ , err := range errs {
if err != nil {
2021-10-06 13:24:40 -04:00
if _ , ok := err . ( BucketExists ) ; ! ok {
// Delete created buckets, ignoring errors.
2022-03-02 23:47:53 -05:00
z . DeleteBucket ( context . Background ( ) , bucket , DeleteBucketOptions {
2022-03-07 19:18:57 -05:00
Force : false ,
NoRecreate : true ,
2022-03-02 23:47:53 -05:00
} )
2021-10-06 13:24:40 -04:00
}
2019-11-19 20:42:27 -05:00
return err
}
}
2020-05-19 16:53:54 -04:00
// If it doesn't exist we get a new, so ignore errors
2020-05-20 13:18:15 -04:00
meta := newBucketMetadata ( bucket )
2022-07-25 20:51:32 -04:00
meta . SetCreatedAt ( opts . CreatedAt )
2020-06-12 23:04:01 -04:00
if opts . LockEnabled {
meta . VersioningConfigXML = enabledBucketVersioningConfig
2020-05-21 14:03:59 -04:00
meta . ObjectLockConfigXML = enabledBucketObjectLockConfig
2020-05-20 13:18:15 -04:00
}
2020-06-12 23:04:01 -04:00
2021-11-30 21:30:06 -05:00
if opts . VersioningEnabled {
meta . VersioningConfigXML = enabledBucketVersioningConfig
}
2021-10-06 13:24:40 -04:00
if err := meta . Save ( context . Background ( ) , z ) ; err != nil {
2020-05-19 16:53:54 -04:00
return toObjectErr ( err , bucket )
2020-05-08 16:44:44 -04:00
}
2020-06-12 23:04:01 -04:00
2020-05-19 16:53:54 -04:00
globalBucketMetadataSys . Set ( bucket , meta )
2020-05-08 16:44:44 -04:00
2019-11-19 20:42:27 -05:00
// Success.
return nil
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) GetObjectNInfo ( ctx context . Context , bucket , object string , rs * HTTPRangeSpec , h http . Header , lockType LockType , opts ObjectOptions ) ( gr * GetObjectReader , err error ) {
2020-10-06 15:03:57 -04:00
if err = checkGetObjArgs ( ctx , bucket , object ) ; err != nil {
return nil , err
}
2020-09-19 11:39:41 -04:00
object = encodeDirObject ( object )
2021-02-16 05:43:47 -05:00
if z . SinglePool ( ) {
return z . serverPools [ 0 ] . GetObjectNInfo ( ctx , bucket , object , rs , h , lockType , opts )
}
var unlockOnDefer bool
2022-01-02 12:15:06 -05:00
nsUnlocker := func ( ) { }
2021-02-16 05:43:47 -05:00
defer func ( ) {
if unlockOnDefer {
nsUnlocker ( )
}
} ( )
// Acquire lock
if lockType != noLock {
lock := z . NewNSLock ( bucket , object )
switch lockType {
case writeLock :
2021-04-29 23:55:21 -04:00
lkctx , err := lock . GetLock ( ctx , globalOperationTimeout )
2021-03-03 21:36:43 -05:00
if err != nil {
2021-02-16 05:43:47 -05:00
return nil , err
}
2021-04-29 23:55:21 -04:00
ctx = lkctx . Context ( )
nsUnlocker = func ( ) { lock . Unlock ( lkctx . Cancel ) }
2021-02-16 05:43:47 -05:00
case readLock :
2021-04-29 23:55:21 -04:00
lkctx , err := lock . GetRLock ( ctx , globalOperationTimeout )
2021-03-03 21:36:43 -05:00
if err != nil {
2021-02-16 05:43:47 -05:00
return nil , err
}
2021-04-29 23:55:21 -04:00
ctx = lkctx . Context ( )
nsUnlocker = func ( ) { lock . RUnlock ( lkctx . Cancel ) }
2021-02-16 05:43:47 -05:00
}
unlockOnDefer = true
}
2021-06-11 02:07:16 -04:00
checkPrecondFn := opts . CheckPrecondFn
2021-06-24 12:44:00 -04:00
opts . CheckPrecondFn = nil // do not need to apply pre-conditions at lower layer.
opts . NoLock = true // no locks needed at lower levels for getObjectInfo()
objInfo , zIdx , err := z . getLatestObjectInfoWithIdx ( ctx , bucket , object , opts )
if err != nil {
if objInfo . DeleteMarker {
if opts . VersionID == "" {
return & GetObjectReader {
ObjInfo : objInfo ,
} , toObjectErr ( errFileNotFound , bucket , object )
2019-11-19 20:42:27 -05:00
}
2021-06-24 12:44:00 -04:00
// Make sure to return object info to provide extra information.
return & GetObjectReader {
ObjInfo : objInfo ,
} , toObjectErr ( errMethodNotAllowed , bucket , object )
2021-06-11 02:07:16 -04:00
}
2021-06-24 12:44:00 -04:00
return nil , err
2021-06-14 14:00:13 -04:00
}
2021-06-24 12:44:00 -04:00
// check preconditions before reading the stream.
if checkPrecondFn != nil && checkPrecondFn ( objInfo ) {
2021-06-14 14:00:13 -04:00
return nil , PreConditionFailed { }
2021-02-16 05:43:47 -05:00
}
2021-06-24 12:44:00 -04:00
lockType = noLock // do not take locks at lower levels for GetObjectNInfo()
2022-05-10 10:47:40 -04:00
gr , err = z . serverPools [ zIdx ] . GetObjectNInfo ( ctx , bucket , object , rs , h , lockType , opts )
if err != nil {
return nil , err
}
if unlockOnDefer {
unlockOnDefer = false
return gr . WithCleanupFuncs ( nsUnlocker ) , nil
}
return gr , nil
2019-11-19 20:42:27 -05:00
}
2021-06-24 12:44:00 -04:00
// getLatestObjectInfoWithIdx returns the objectInfo of the latest object from multiple pools (this function
// is present in-case there were duplicate writes to both pools, this function also returns the
// additional index where the latest object exists, that is used to start the GetObject stream.
func ( z * erasureServerPools ) getLatestObjectInfoWithIdx ( ctx context . Context , bucket , object string , opts ObjectOptions ) ( ObjectInfo , int , error ) {
2020-09-19 11:39:41 -04:00
object = encodeDirObject ( object )
2021-06-14 14:00:13 -04:00
results := make ( [ ] struct {
zIdx int
oi ObjectInfo
err error
} , len ( z . serverPools ) )
2021-02-16 05:43:47 -05:00
var wg sync . WaitGroup
for i , pool := range z . serverPools {
wg . Add ( 1 )
go func ( i int , pool * erasureSets ) {
defer wg . Done ( )
2021-06-14 14:00:13 -04:00
results [ i ] . zIdx = i
results [ i ] . oi , results [ i ] . err = pool . GetObjectInfo ( ctx , bucket , object , opts )
2021-02-16 05:43:47 -05:00
} ( i , pool )
}
wg . Wait ( )
2021-06-11 02:07:16 -04:00
// Sort the objInfos such that we always serve latest
// this is a defensive change to handle any duplicate
// content that may have been created, we always serve
// the latest object.
2021-06-14 14:00:13 -04:00
sort . Slice ( results , func ( i , j int ) bool {
a , b := results [ i ] , results [ j ]
if a . oi . ModTime . Equal ( b . oi . ModTime ) {
2022-07-05 10:37:24 -04:00
// On tiebreak, select the lowest pool index.
2021-06-14 14:00:13 -04:00
return a . zIdx < b . zIdx
}
return a . oi . ModTime . After ( b . oi . ModTime )
} )
2021-06-24 12:44:00 -04:00
2021-06-14 14:00:13 -04:00
for _ , res := range results {
err := res . err
2021-02-16 05:43:47 -05:00
if err == nil {
2021-06-24 12:44:00 -04:00
return res . oi , res . zIdx , nil
2021-02-16 05:43:47 -05:00
}
if ! isErrObjectNotFound ( err ) && ! isErrVersionNotFound ( err ) {
// some errors such as MethodNotAllowed for delete marker
// should be returned upwards.
2021-06-24 12:44:00 -04:00
return res . oi , res . zIdx , err
2019-11-19 20:42:27 -05:00
}
2022-03-28 02:39:50 -04:00
// When its a delete marker and versionID is empty
// we should simply return the error right away.
if res . oi . DeleteMarker && opts . VersionID == "" {
return res . oi , res . zIdx , err
}
2019-11-19 20:42:27 -05:00
}
2021-02-16 05:43:47 -05:00
2020-09-19 11:39:41 -04:00
object = decodeDirObject ( object )
2020-07-02 19:17:27 -04:00
if opts . VersionID != "" {
2021-06-24 12:44:00 -04:00
return ObjectInfo { } , - 1 , VersionNotFound { Bucket : bucket , Object : object , VersionID : opts . VersionID }
2020-07-02 19:17:27 -04:00
}
2021-06-24 12:44:00 -04:00
return ObjectInfo { } , - 1 , ObjectNotFound { Bucket : bucket , Object : object }
}
func ( z * erasureServerPools ) GetObjectInfo ( ctx context . Context , bucket , object string , opts ObjectOptions ) ( objInfo ObjectInfo , err error ) {
if err = checkGetObjArgs ( ctx , bucket , object ) ; err != nil {
return objInfo , err
}
object = encodeDirObject ( object )
if z . SinglePool ( ) {
return z . serverPools [ 0 ] . GetObjectInfo ( ctx , bucket , object , opts )
}
if ! opts . NoLock {
opts . NoLock = true // avoid taking locks at lower levels for multi-pool setups.
// Lock the object before reading.
lk := z . NewNSLock ( bucket , object )
lkctx , err := lk . GetRLock ( ctx , globalOperationTimeout )
if err != nil {
return ObjectInfo { } , err
}
ctx = lkctx . Context ( )
defer lk . RUnlock ( lkctx . Cancel )
}
objInfo , _ , err = z . getLatestObjectInfoWithIdx ( ctx , bucket , object , opts )
return objInfo , err
2019-11-19 20:42:27 -05:00
}
2021-01-06 12:35:47 -05:00
// PutObject - writes an object to least used erasure pool.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) PutObject ( ctx context . Context , bucket string , object string , data * PutObjReader , opts ObjectOptions ) ( ObjectInfo , error ) {
2020-10-06 15:03:57 -04:00
// Validate put object input args.
if err := checkPutObjectArgs ( ctx , bucket , object , z ) ; err != nil {
return ObjectInfo { } , err
}
2020-09-19 11:39:41 -04:00
object = encodeDirObject ( object )
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2022-05-30 13:58:37 -04:00
if ! isMinioMetaBucketName ( bucket ) && ! hasSpaceFor ( getDiskInfos ( ctx , z . serverPools [ 0 ] . getHashedSet ( object ) . getDisks ( ) ... ) , data . Size ( ) ) {
2021-06-07 11:13:15 -04:00
return ObjectInfo { } , toObjectErr ( errDiskFull )
}
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . PutObject ( ctx , bucket , object , data , opts )
2019-11-19 20:42:27 -05:00
}
2021-06-21 12:25:10 -04:00
if ! opts . NoLock {
2021-09-23 00:46:24 -04:00
ns := z . NewNSLock ( bucket , object )
2021-06-21 12:25:10 -04:00
lkctx , err := ns . GetLock ( ctx , globalOperationTimeout )
if err != nil {
return ObjectInfo { } , err
}
ctx = lkctx . Context ( )
defer ns . Unlock ( lkctx . Cancel )
opts . NoLock = true
}
2019-11-19 20:42:27 -05:00
2021-09-23 00:46:24 -04:00
idx , err := z . getPoolIdxNoLock ( ctx , bucket , object , data . Size ( ) )
2020-06-17 11:33:14 -04:00
if err != nil {
return ObjectInfo { } , err
2019-11-19 20:42:27 -05:00
}
2020-06-17 11:33:14 -04:00
2021-01-06 12:35:47 -05:00
// Overwrite the object at the right pool
2020-12-01 16:50:33 -05:00
return z . serverPools [ idx ] . PutObject ( ctx , bucket , object , data , opts )
2019-11-19 20:42:27 -05:00
}
2021-06-15 21:43:14 -04:00
func ( z * erasureServerPools ) deletePrefix ( ctx context . Context , bucket string , prefix string ) error {
2022-07-14 23:44:22 -04:00
for _ , pool := range z . serverPools {
if _ , err := pool . DeleteObject ( ctx , bucket , prefix , ObjectOptions { DeletePrefix : true } ) ; err != nil {
2021-06-15 21:43:14 -04:00
return err
}
}
return nil
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) DeleteObject ( ctx context . Context , bucket string , object string , opts ObjectOptions ) ( objInfo ObjectInfo , err error ) {
2020-10-06 15:03:57 -04:00
if err = checkDelObjArgs ( ctx , bucket , object ) ; err != nil {
return objInfo , err
}
2021-06-15 21:43:14 -04:00
if opts . DeletePrefix {
err := z . deletePrefix ( ctx , bucket , object )
return ObjectInfo { } , err
}
2020-09-19 11:39:41 -04:00
object = encodeDirObject ( object )
2022-08-18 19:41:59 -04:00
// Acquire a write lock before deleting the object.
lk := z . NewNSLock ( bucket , object )
lkctx , err := lk . GetLock ( ctx , globalDeleteOperationTimeout )
if err != nil {
return ObjectInfo { } , err
2019-11-19 20:42:27 -05:00
}
2022-08-18 19:41:59 -04:00
ctx = lkctx . Context ( )
defer lk . Unlock ( lkctx . Cancel )
2021-02-08 21:12:28 -05:00
2022-08-18 19:41:59 -04:00
gopts := opts
gopts . NoLock = true
pinfo , err := z . getPoolInfoExistingWithOpts ( ctx , bucket , object , gopts )
2021-02-08 21:12:28 -05:00
if err != nil {
2022-08-18 19:41:59 -04:00
switch err . ( type ) {
case InsufficientReadQuorum :
return objInfo , InsufficientWriteQuorum { }
}
2021-02-08 21:12:28 -05:00
return objInfo , err
2019-11-19 20:42:27 -05:00
}
2020-10-08 15:32:32 -04:00
2022-08-18 19:41:59 -04:00
// Delete marker already present we are not going to create new delete markers.
if pinfo . ObjInfo . DeleteMarker && opts . VersionID == "" {
pinfo . ObjInfo . Name = decodeDirObject ( object )
return pinfo . ObjInfo , nil
}
objInfo , err = z . serverPools [ pinfo . Index ] . DeleteObject ( ctx , bucket , object , opts )
objInfo . Name = decodeDirObject ( object )
return objInfo , err
2019-11-19 20:42:27 -05:00
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) DeleteObjects ( ctx context . Context , bucket string , objects [ ] ObjectToDelete , opts ObjectOptions ) ( [ ] DeletedObject , [ ] error ) {
2019-11-19 20:42:27 -05:00
derrs := make ( [ ] error , len ( objects ) )
2020-06-12 23:04:01 -04:00
dobjects := make ( [ ] DeletedObject , len ( objects ) )
2020-06-18 13:25:07 -04:00
objSets := set . NewStringSet ( )
2019-11-19 20:42:27 -05:00
for i := range derrs {
2020-09-19 11:39:41 -04:00
objects [ i ] . ObjectName = encodeDirObject ( objects [ i ] . ObjectName )
2020-06-12 23:04:01 -04:00
derrs [ i ] = checkDelObjArgs ( ctx , bucket , objects [ i ] . ObjectName )
2020-06-18 13:25:07 -04:00
objSets . Add ( objects [ i ] . ObjectName )
2019-11-19 20:42:27 -05:00
}
2021-08-17 10:50:00 -04:00
// Acquire a bulk write lock across 'objects'
multiDeleteLock := z . NewNSLock ( bucket , objSets . ToSlice ( ) ... )
lkctx , err := multiDeleteLock . GetLock ( ctx , globalOperationTimeout )
if err != nil {
for i := range derrs {
derrs [ i ] = err
}
return dobjects , derrs
}
ctx = lkctx . Context ( )
defer multiDeleteLock . Unlock ( lkctx . Cancel )
// Fetch location of up to 10 objects concurrently.
2021-02-10 17:25:43 -05:00
poolObjIdxMap := map [ int ] [ ] ObjectToDelete { }
origIndexMap := map [ int ] [ ] int { }
2021-08-17 10:50:00 -04:00
2022-08-18 12:33:56 -04:00
// Always perform 1/10th of the number of objects per delete
concurrent := len ( objects ) / 10
if concurrent <= 10 {
// if we cannot get 1/10th then choose the number of
// objects as concurrent.
concurrent = len ( objects )
}
2021-08-17 10:50:00 -04:00
var mu sync . Mutex
2022-08-18 12:33:56 -04:00
eg := errgroup . WithNErrs ( len ( objects ) ) . WithConcurrency ( concurrent )
2021-08-17 10:50:00 -04:00
for j , obj := range objects {
j := j
obj := obj
eg . Go ( func ( ) error {
2022-08-18 19:41:59 -04:00
pinfo , err := z . getPoolInfoExistingWithOpts ( ctx , bucket , obj . ObjectName , ObjectOptions {
2022-07-06 12:53:16 -04:00
NoLock : true ,
} )
2021-11-15 12:46:55 -05:00
if err != nil {
2021-03-16 14:02:20 -04:00
derrs [ j ] = err
2022-08-18 19:41:59 -04:00
dobjects [ j ] = DeletedObject {
ObjectName : obj . ObjectName ,
}
return nil
}
// Delete marker already present we are not going to create new delete markers.
if pinfo . ObjInfo . DeleteMarker && obj . VersionID == "" {
dobjects [ j ] = DeletedObject {
DeleteMarker : pinfo . ObjInfo . DeleteMarker ,
DeleteMarkerVersionID : pinfo . ObjInfo . VersionID ,
DeleteMarkerMTime : DeleteMarkerMTime { pinfo . ObjInfo . ModTime } ,
ObjectName : pinfo . ObjInfo . Name ,
}
2021-08-17 10:50:00 -04:00
return nil
2021-03-16 14:02:20 -04:00
}
2022-08-18 19:41:59 -04:00
idx := pinfo . Index
2021-08-17 10:50:00 -04:00
mu . Lock ( )
2022-08-18 19:41:59 -04:00
defer mu . Unlock ( )
2021-02-10 17:25:43 -05:00
poolObjIdxMap [ idx ] = append ( poolObjIdxMap [ idx ] , obj )
origIndexMap [ idx ] = append ( origIndexMap [ idx ] , j )
2021-08-17 10:50:00 -04:00
return nil
} , j )
2021-02-10 17:25:43 -05:00
}
2021-11-15 12:46:55 -05:00
eg . Wait ( ) // wait to check all the pools.
2020-11-29 00:15:45 -05:00
2022-08-18 19:41:59 -04:00
if len ( poolObjIdxMap ) > 0 {
// Delete concurrently in all server pools.
var wg sync . WaitGroup
wg . Add ( len ( z . serverPools ) )
for idx , pool := range z . serverPools {
go func ( idx int , pool * erasureSets ) {
defer wg . Done ( )
objs := poolObjIdxMap [ idx ]
if len ( objs ) > 0 {
orgIndexes := origIndexMap [ idx ]
deletedObjects , errs := pool . DeleteObjects ( ctx , bucket , objs , opts )
mu . Lock ( )
for i , derr := range errs {
if derr != nil {
derrs [ orgIndexes [ i ] ] = derr
}
deletedObjects [ i ] . ObjectName = decodeDirObject ( deletedObjects [ i ] . ObjectName )
dobjects [ orgIndexes [ i ] ] = deletedObjects [ i ]
2021-08-17 10:50:00 -04:00
}
2022-08-18 19:41:59 -04:00
mu . Unlock ( )
2021-08-17 10:50:00 -04:00
}
2022-08-18 19:41:59 -04:00
} ( idx , pool )
}
wg . Wait ( )
2019-11-19 20:42:27 -05:00
}
2021-08-17 10:50:00 -04:00
2020-06-12 23:04:01 -04:00
return dobjects , derrs
2019-11-19 20:42:27 -05:00
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) CopyObject ( ctx context . Context , srcBucket , srcObject , dstBucket , dstObject string , srcInfo ObjectInfo , srcOpts , dstOpts ObjectOptions ) ( objInfo ObjectInfo , err error ) {
2020-09-19 11:39:41 -04:00
srcObject = encodeDirObject ( srcObject )
dstObject = encodeDirObject ( dstObject )
2020-05-28 17:36:38 -04:00
cpSrcDstSame := isStringEqual ( pathJoin ( srcBucket , srcObject ) , pathJoin ( dstBucket , dstObject ) )
2019-11-19 20:42:27 -05:00
2021-06-21 12:25:10 -04:00
if ! dstOpts . NoLock {
2021-09-23 00:46:24 -04:00
ns := z . NewNSLock ( dstBucket , dstObject )
2021-06-21 12:25:10 -04:00
lkctx , err := ns . GetLock ( ctx , globalOperationTimeout )
if err != nil {
return ObjectInfo { } , err
}
ctx = lkctx . Context ( )
defer ns . Unlock ( lkctx . Cancel )
dstOpts . NoLock = true
}
2021-09-23 00:46:24 -04:00
poolIdx , err := z . getPoolIdxNoLock ( ctx , dstBucket , dstObject , srcInfo . Size )
2020-06-17 11:33:14 -04:00
if err != nil {
return objInfo , err
2020-05-28 17:36:38 -04:00
}
2020-08-03 19:21:10 -04:00
if cpSrcDstSame && srcInfo . metadataOnly {
2020-09-14 18:57:13 -04:00
// Version ID is set for the destination and source == destination version ID.
2020-06-19 11:44:51 -04:00
if dstOpts . VersionID != "" && srcOpts . VersionID == dstOpts . VersionID {
2021-01-06 12:35:47 -05:00
return z . serverPools [ poolIdx ] . CopyObject ( ctx , srcBucket , srcObject , dstBucket , dstObject , srcInfo , srcOpts , dstOpts )
2020-06-19 11:44:51 -04:00
}
2020-09-14 18:57:13 -04:00
// Destination is not versioned and source version ID is empty
// perform an in-place update.
2020-06-19 11:44:51 -04:00
if ! dstOpts . Versioned && srcOpts . VersionID == "" {
2021-01-06 12:35:47 -05:00
return z . serverPools [ poolIdx ] . CopyObject ( ctx , srcBucket , srcObject , dstBucket , dstObject , srcInfo , srcOpts , dstOpts )
2020-06-19 11:44:51 -04:00
}
2020-09-14 18:57:13 -04:00
// Destination is versioned, source is not destination version,
// as a special case look for if the source object is not legacy
// from older format, for older format we will rewrite them as
// newer using PutObject() - this is an optimization to save space
2020-08-03 19:21:10 -04:00
if dstOpts . Versioned && srcOpts . VersionID != dstOpts . VersionID && ! srcInfo . Legacy {
// CopyObject optimization where we don't create an entire copy
// of the content, instead we add a reference.
srcInfo . versionOnly = true
2021-01-06 12:35:47 -05:00
return z . serverPools [ poolIdx ] . CopyObject ( ctx , srcBucket , srcObject , dstBucket , dstObject , srcInfo , srcOpts , dstOpts )
2020-08-03 19:21:10 -04:00
}
2020-06-19 11:44:51 -04:00
}
2020-06-17 14:13:41 -04:00
putOpts := ObjectOptions {
ServerSideEncryption : dstOpts . ServerSideEncryption ,
UserDefined : srcInfo . UserDefined ,
Versioned : dstOpts . Versioned ,
VersionID : dstOpts . VersionID ,
2020-11-19 14:50:22 -05:00
MTime : dstOpts . MTime ,
2021-09-23 00:46:24 -04:00
NoLock : true ,
2020-06-17 14:13:41 -04:00
}
2021-01-06 12:35:47 -05:00
return z . serverPools [ poolIdx ] . PutObject ( ctx , dstBucket , dstObject , srcInfo . PutObjReader , putOpts )
2019-11-19 20:42:27 -05:00
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) ListObjectsV2 ( ctx context . Context , bucket , prefix , continuationToken , delimiter string , maxKeys int , fetchOwner bool , startAfter string ) ( ListObjectsV2Info , error ) {
2019-11-19 20:42:27 -05:00
marker := continuationToken
if marker == "" {
marker = startAfter
}
loi , err := z . ListObjects ( ctx , bucket , prefix , marker , delimiter , maxKeys )
if err != nil {
return ListObjectsV2Info { } , err
}
listObjectsV2Info := ListObjectsV2Info {
IsTruncated : loi . IsTruncated ,
ContinuationToken : continuationToken ,
NextContinuationToken : loi . NextMarker ,
Objects : loi . Objects ,
Prefixes : loi . Prefixes ,
}
return listObjectsV2Info , err
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) ListObjectVersions ( ctx context . Context , bucket , prefix , marker , versionMarker , delimiter string , maxKeys int ) ( ListObjectVersionsInfo , error ) {
2020-06-12 23:04:01 -04:00
loi := ListObjectVersionsInfo { }
if marker == "" && versionMarker != "" {
return loi , NotImplemented { }
}
2020-11-13 19:58:20 -05:00
opts := listPathOptions {
2020-10-28 12:18:35 -04:00
Bucket : bucket ,
Prefix : prefix ,
Separator : delimiter ,
2021-03-01 11:12:02 -05:00
Limit : maxKeysPlusOne ( maxKeys , marker != "" ) ,
2020-10-28 12:18:35 -04:00
Marker : marker ,
InclDeleted : true ,
2020-11-03 11:53:48 -05:00
AskDisks : globalAPIConfig . getListQuorum ( ) ,
2021-12-09 17:59:23 -05:00
Versioned : true ,
2020-11-13 19:58:20 -05:00
}
2022-08-09 18:00:24 -04:00
// set bucket metadata in opts
opts . setBucketMeta ( ctx )
2020-11-13 19:58:20 -05:00
2021-07-05 18:34:41 -04:00
merged , err := z . listPath ( ctx , & opts )
2020-10-28 12:18:35 -04:00
if err != nil && err != io . EOF {
return loi , err
2020-06-12 23:04:01 -04:00
}
2021-08-13 14:39:27 -04:00
defer merged . truncate ( 0 ) // Release when returning
2021-03-01 11:12:02 -05:00
if versionMarker == "" {
2021-07-05 18:34:41 -04:00
o := listPathOptions { Marker : marker }
2021-03-01 11:12:02 -05:00
// If we are not looking for a specific version skip it.
2021-07-05 18:34:41 -04:00
o . parseMarker ( )
merged . forwardPast ( o . Marker )
2021-03-01 11:12:02 -05:00
}
2020-12-19 12:36:04 -05:00
objects := merged . fileInfoVersions ( bucket , prefix , delimiter , versionMarker )
loi . IsTruncated = err == nil && len ( objects ) > 0
if maxKeys > 0 && len ( objects ) > maxKeys {
objects = objects [ : maxKeys ]
2020-10-28 12:18:35 -04:00
loi . IsTruncated = true
2020-06-12 23:04:01 -04:00
}
2020-12-19 12:36:04 -05:00
for _ , obj := range objects {
2021-02-05 19:24:40 -05:00
if obj . IsDir && obj . ModTime . IsZero ( ) && delimiter != "" {
2020-12-19 12:36:04 -05:00
loi . Prefixes = append ( loi . Prefixes , obj . Name )
} else {
loi . Objects = append ( loi . Objects , obj )
}
}
2020-06-12 23:04:01 -04:00
if loi . IsTruncated {
2020-12-19 12:36:04 -05:00
last := objects [ len ( objects ) - 1 ]
2021-07-05 18:34:41 -04:00
loi . NextMarker = opts . encodeMarker ( last . Name )
2020-10-28 12:18:35 -04:00
loi . NextVersionIDMarker = last . VersionID
2019-11-19 20:42:27 -05:00
}
2020-10-28 12:18:35 -04:00
return loi , nil
}
2019-11-19 20:42:27 -05:00
2021-03-01 11:12:02 -05:00
func maxKeysPlusOne ( maxKeys int , addOne bool ) int {
if maxKeys < 0 || maxKeys > maxObjectList {
maxKeys = maxObjectList
}
if addOne {
maxKeys ++
}
return maxKeys
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) ListObjects ( ctx context . Context , bucket , prefix , marker , delimiter string , maxKeys int ) ( ListObjectsInfo , error ) {
2020-10-28 12:18:35 -04:00
var loi ListObjectsInfo
2022-08-09 18:00:24 -04:00
opts := listPathOptions {
Bucket : bucket ,
Prefix : prefix ,
Separator : delimiter ,
Limit : maxKeysPlusOne ( maxKeys , marker != "" ) ,
Marker : marker ,
InclDeleted : false ,
AskDisks : globalAPIConfig . getListQuorum ( ) ,
}
opts . setBucketMeta ( ctx )
2022-04-11 16:25:32 -04:00
2021-08-18 21:05:05 -04:00
if len ( prefix ) > 0 && maxKeys == 1 && delimiter == "" && marker == "" {
// Optimization for certain applications like
// - Cohesity
// - Actifio, Splunk etc.
// which send ListObjects requests where the actual object
// itself is the prefix and max-keys=1 in such scenarios
// we can simply verify locally if such an object exists
// to avoid the need for ListObjects().
objInfo , err := z . GetObjectInfo ( ctx , bucket , prefix , ObjectOptions { NoLock : true } )
if err == nil {
2022-08-09 18:00:24 -04:00
if opts . Lifecycle != nil {
2022-10-07 13:24:12 -04:00
action := evalActionFromLifecycle ( ctx , * opts . Lifecycle , opts . Retention , objInfo )
2022-03-22 15:39:45 -04:00
switch action {
case lifecycle . DeleteVersionAction , lifecycle . DeleteAction :
2022-04-10 14:14:52 -04:00
fallthrough
case lifecycle . DeleteRestoredAction , lifecycle . DeleteRestoredVersionAction :
2022-03-22 15:39:45 -04:00
return loi , nil
}
}
2021-08-18 21:05:05 -04:00
loi . Objects = append ( loi . Objects , objInfo )
return loi , nil
}
}
2021-07-05 18:34:41 -04:00
merged , err := z . listPath ( ctx , & opts )
2020-10-28 12:18:35 -04:00
if err != nil && err != io . EOF {
2021-12-06 05:59:51 -05:00
if ! isErrBucketNotFound ( err ) {
logger . LogIf ( ctx , err )
}
2020-10-28 12:18:35 -04:00
return loi , err
2020-06-12 23:04:01 -04:00
}
2021-07-05 18:34:41 -04:00
merged . forwardPast ( opts . Marker )
2021-08-13 14:39:27 -04:00
defer merged . truncate ( 0 ) // Release when returning
2020-12-19 12:36:04 -05:00
2020-10-28 12:18:35 -04:00
// Default is recursive, if delimiter is set then list non recursive.
2020-12-19 12:36:04 -05:00
objects := merged . fileInfos ( bucket , prefix , delimiter )
loi . IsTruncated = err == nil && len ( objects ) > 0
if maxKeys > 0 && len ( objects ) > maxKeys {
objects = objects [ : maxKeys ]
loi . IsTruncated = true
}
for _ , obj := range objects {
2021-02-05 19:24:40 -05:00
if obj . IsDir && obj . ModTime . IsZero ( ) && delimiter != "" {
2020-12-19 12:36:04 -05:00
loi . Prefixes = append ( loi . Prefixes , obj . Name )
} else {
loi . Objects = append ( loi . Objects , obj )
}
}
2020-06-12 23:04:01 -04:00
if loi . IsTruncated {
2020-12-19 12:36:04 -05:00
last := objects [ len ( objects ) - 1 ]
2021-07-05 18:34:41 -04:00
loi . NextMarker = opts . encodeMarker ( last . Name )
2020-06-12 23:04:01 -04:00
}
return loi , nil
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) ListMultipartUploads ( ctx context . Context , bucket , prefix , keyMarker , uploadIDMarker , delimiter string , maxUploads int ) ( ListMultipartsInfo , error ) {
2020-05-19 16:53:54 -04:00
if err := checkListMultipartArgs ( ctx , bucket , prefix , keyMarker , uploadIDMarker , delimiter , z ) ; err != nil {
return ListMultipartsInfo { } , err
}
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . ListMultipartUploads ( ctx , bucket , prefix , keyMarker , uploadIDMarker , delimiter , maxUploads )
2019-11-19 20:42:27 -05:00
}
2020-05-19 16:53:54 -04:00
2022-01-02 12:15:06 -05:00
poolResult := ListMultipartsInfo { }
2021-01-06 12:35:47 -05:00
poolResult . MaxUploads = maxUploads
poolResult . KeyMarker = keyMarker
poolResult . Prefix = prefix
poolResult . Delimiter = delimiter
2022-07-14 23:44:22 -04:00
for idx , pool := range z . serverPools {
if z . IsSuspended ( idx ) {
continue
}
2021-01-06 12:35:47 -05:00
result , err := pool . ListMultipartUploads ( ctx , bucket , prefix , keyMarker , uploadIDMarker ,
2019-11-19 20:42:27 -05:00
delimiter , maxUploads )
if err != nil {
return result , err
}
2021-01-06 12:35:47 -05:00
poolResult . Uploads = append ( poolResult . Uploads , result . Uploads ... )
2019-11-19 20:42:27 -05:00
}
2021-01-06 12:35:47 -05:00
return poolResult , nil
2019-11-19 20:42:27 -05:00
}
// Initiate a new multipart upload on a hashedSet based on object name.
2022-08-29 19:57:16 -04:00
func ( z * erasureServerPools ) NewMultipartUpload ( ctx context . Context , bucket , object string , opts ObjectOptions ) ( * NewMultipartUploadResult , error ) {
2020-05-19 16:53:54 -04:00
if err := checkNewMultipartArgs ( ctx , bucket , object , z ) ; err != nil {
2022-08-29 19:57:16 -04:00
return nil , err
2020-05-19 16:53:54 -04:00
}
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2022-05-30 13:58:37 -04:00
if ! isMinioMetaBucketName ( bucket ) && ! hasSpaceFor ( getDiskInfos ( ctx , z . serverPools [ 0 ] . getHashedSet ( object ) . getDisks ( ) ... ) , - 1 ) {
2022-08-29 19:57:16 -04:00
return nil , toObjectErr ( errDiskFull )
2021-06-07 11:13:15 -04:00
}
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . NewMultipartUpload ( ctx , bucket , object , opts )
2019-11-19 20:42:27 -05:00
}
2020-06-17 11:33:14 -04:00
2021-04-21 13:57:36 -04:00
for idx , pool := range z . serverPools {
2022-07-14 23:44:22 -04:00
if z . IsSuspended ( idx ) {
continue
}
2021-04-21 13:57:36 -04:00
result , err := pool . ListMultipartUploads ( ctx , bucket , object , "" , "" , "" , maxUploadsList )
if err != nil {
2022-08-29 19:57:16 -04:00
return nil , err
2021-04-21 13:57:36 -04:00
}
// If there is a multipart upload with the same bucket/object name,
// create the new multipart in the same pool, this will avoid
// creating two multiparts uploads in two different pools
if len ( result . Uploads ) != 0 {
return z . serverPools [ idx ] . NewMultipartUpload ( ctx , bucket , object , opts )
}
}
2021-09-23 00:46:24 -04:00
// any parallel writes on the object will block for this poolIdx
// to return since this holds a read lock on the namespace.
2021-06-07 11:13:15 -04:00
idx , err := z . getPoolIdx ( ctx , bucket , object , - 1 )
2021-05-06 13:45:33 -04:00
if err != nil {
2022-08-29 19:57:16 -04:00
return nil , err
2020-06-17 11:33:14 -04:00
}
2020-12-01 16:50:33 -05:00
return z . serverPools [ idx ] . NewMultipartUpload ( ctx , bucket , object , opts )
2019-11-19 20:42:27 -05:00
}
// Copies a part of an object from source hashedSet to destination hashedSet.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) CopyObjectPart ( ctx context . Context , srcBucket , srcObject , destBucket , destObject string , uploadID string , partID int , startOffset int64 , length int64 , srcInfo ObjectInfo , srcOpts , dstOpts ObjectOptions ) ( PartInfo , error ) {
2020-05-19 16:53:54 -04:00
if err := checkNewMultipartArgs ( ctx , srcBucket , srcObject , z ) ; err != nil {
return PartInfo { } , err
}
2019-11-19 20:42:27 -05:00
return z . PutObjectPart ( ctx , destBucket , destObject , uploadID , partID ,
2021-02-10 11:52:50 -05:00
NewPutObjReader ( srcInfo . Reader ) , dstOpts )
2019-11-19 20:42:27 -05:00
}
// PutObjectPart - writes part of an object to hashedSet based on the object name.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) PutObjectPart ( ctx context . Context , bucket , object , uploadID string , partID int , data * PutObjReader , opts ObjectOptions ) ( PartInfo , error ) {
2020-05-19 16:53:54 -04:00
if err := checkPutObjectPartArgs ( ctx , bucket , object , z ) ; err != nil {
return PartInfo { } , err
}
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . PutObjectPart ( ctx , bucket , object , uploadID , partID , data , opts )
2019-11-19 20:42:27 -05:00
}
2020-05-28 17:36:38 -04:00
2022-07-14 23:44:22 -04:00
for idx , pool := range z . serverPools {
if z . IsSuspended ( idx ) {
continue
}
2021-01-06 12:35:47 -05:00
_ , err := pool . GetMultipartInfo ( ctx , bucket , object , uploadID , opts )
2020-05-28 15:36:20 -04:00
if err == nil {
2021-01-06 12:35:47 -05:00
return pool . PutObjectPart ( ctx , bucket , object , uploadID , partID , data , opts )
2019-11-19 20:42:27 -05:00
}
2020-05-28 15:36:20 -04:00
switch err . ( type ) {
case InvalidUploadID :
2021-01-06 12:35:47 -05:00
// Look for information on the next pool
2020-05-28 15:36:20 -04:00
continue
}
// Any other unhandled errors such as quorum return.
return PartInfo { } , err
2019-11-19 20:42:27 -05:00
}
return PartInfo { } , InvalidUploadID {
Bucket : bucket ,
Object : object ,
UploadID : uploadID ,
}
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) GetMultipartInfo ( ctx context . Context , bucket , object , uploadID string , opts ObjectOptions ) ( MultipartInfo , error ) {
2020-05-28 15:36:20 -04:00
if err := checkListPartsArgs ( ctx , bucket , object , z ) ; err != nil {
return MultipartInfo { } , err
}
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . GetMultipartInfo ( ctx , bucket , object , uploadID , opts )
2020-05-28 15:36:20 -04:00
}
2022-07-14 23:44:22 -04:00
for idx , pool := range z . serverPools {
if z . IsSuspended ( idx ) {
continue
}
2021-01-06 12:35:47 -05:00
mi , err := pool . GetMultipartInfo ( ctx , bucket , object , uploadID , opts )
2020-05-28 15:36:20 -04:00
if err == nil {
return mi , nil
}
switch err . ( type ) {
case InvalidUploadID :
2021-01-06 12:35:47 -05:00
// upload id not found, continue to the next pool.
2020-05-28 15:36:20 -04:00
continue
}
// any other unhandled error return right here.
return MultipartInfo { } , err
}
return MultipartInfo { } , InvalidUploadID {
Bucket : bucket ,
Object : object ,
UploadID : uploadID ,
}
}
2019-11-19 20:42:27 -05:00
// ListObjectParts - lists all uploaded parts to an object in hashedSet.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) ListObjectParts ( ctx context . Context , bucket , object , uploadID string , partNumberMarker int , maxParts int , opts ObjectOptions ) ( ListPartsInfo , error ) {
2020-05-19 16:53:54 -04:00
if err := checkListPartsArgs ( ctx , bucket , object , z ) ; err != nil {
return ListPartsInfo { } , err
}
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . ListObjectParts ( ctx , bucket , object , uploadID , partNumberMarker , maxParts , opts )
2019-11-19 20:42:27 -05:00
}
2022-07-14 23:44:22 -04:00
for idx , pool := range z . serverPools {
if z . IsSuspended ( idx ) {
continue
}
2021-01-06 12:35:47 -05:00
_ , err := pool . GetMultipartInfo ( ctx , bucket , object , uploadID , opts )
2020-05-28 15:36:20 -04:00
if err == nil {
2021-01-06 12:35:47 -05:00
return pool . ListObjectParts ( ctx , bucket , object , uploadID , partNumberMarker , maxParts , opts )
2019-11-19 20:42:27 -05:00
}
2020-05-28 15:36:20 -04:00
switch err . ( type ) {
case InvalidUploadID :
continue
}
return ListPartsInfo { } , err
2019-11-19 20:42:27 -05:00
}
return ListPartsInfo { } , InvalidUploadID {
Bucket : bucket ,
Object : object ,
UploadID : uploadID ,
}
}
// Aborts an in-progress multipart operation on hashedSet based on the object name.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) AbortMultipartUpload ( ctx context . Context , bucket , object , uploadID string , opts ObjectOptions ) error {
2020-05-19 16:53:54 -04:00
if err := checkAbortMultipartArgs ( ctx , bucket , object , z ) ; err != nil {
return err
}
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . AbortMultipartUpload ( ctx , bucket , object , uploadID , opts )
2019-11-19 20:42:27 -05:00
}
2020-05-19 16:53:54 -04:00
2022-07-14 23:44:22 -04:00
for idx , pool := range z . serverPools {
if z . IsSuspended ( idx ) {
continue
}
2021-01-06 12:35:47 -05:00
_ , err := pool . GetMultipartInfo ( ctx , bucket , object , uploadID , opts )
2020-05-28 15:36:20 -04:00
if err == nil {
2021-01-06 12:35:47 -05:00
return pool . AbortMultipartUpload ( ctx , bucket , object , uploadID , opts )
2019-11-19 20:42:27 -05:00
}
2020-05-28 15:36:20 -04:00
switch err . ( type ) {
case InvalidUploadID :
2021-01-06 12:35:47 -05:00
// upload id not found move to next pool
2020-05-28 15:36:20 -04:00
continue
}
return err
2019-11-19 20:42:27 -05:00
}
return InvalidUploadID {
Bucket : bucket ,
Object : object ,
UploadID : uploadID ,
}
}
// CompleteMultipartUpload - completes a pending multipart transaction, on hashedSet based on object name.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) CompleteMultipartUpload ( ctx context . Context , bucket , object , uploadID string , uploadedParts [ ] CompletePart , opts ObjectOptions ) ( objInfo ObjectInfo , err error ) {
2020-05-19 16:53:54 -04:00
if err = checkCompleteMultipartArgs ( ctx , bucket , object , z ) ; err != nil {
return objInfo , err
}
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . CompleteMultipartUpload ( ctx , bucket , object , uploadID , uploadedParts , opts )
2019-11-19 20:42:27 -05:00
}
2022-07-14 23:44:22 -04:00
for idx , pool := range z . serverPools {
if z . IsSuspended ( idx ) {
continue
}
2021-04-21 13:57:36 -04:00
_ , err := pool . GetMultipartInfo ( ctx , bucket , object , uploadID , opts )
if err == nil {
2021-01-06 12:35:47 -05:00
return pool . CompleteMultipartUpload ( ctx , bucket , object , uploadID , uploadedParts , opts )
2019-11-19 20:42:27 -05:00
}
}
2021-04-21 13:57:36 -04:00
2019-11-19 20:42:27 -05:00
return objInfo , InvalidUploadID {
Bucket : bucket ,
Object : object ,
UploadID : uploadID ,
}
}
2020-12-01 16:50:33 -05:00
// GetBucketInfo - returns bucket info from one of the erasure coded serverPools.
2022-07-25 20:51:32 -04:00
func ( z * erasureServerPools ) GetBucketInfo ( ctx context . Context , bucket string , opts BucketOptions ) ( bucketInfo BucketInfo , err error ) {
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2022-07-25 20:51:32 -04:00
bucketInfo , err = z . serverPools [ 0 ] . GetBucketInfo ( ctx , bucket , opts )
2020-05-19 16:53:54 -04:00
if err != nil {
return bucketInfo , err
}
meta , err := globalBucketMetadataSys . Get ( bucket )
if err == nil {
bucketInfo . Created = meta . Created
2022-10-15 14:58:31 -04:00
bucketInfo . Versioning = meta . LockEnabled || globalBucketVersioningSys . Enabled ( bucket )
bucketInfo . ObjectLocking = meta . LockEnabled
2020-05-19 16:53:54 -04:00
}
return bucketInfo , nil
2019-11-19 20:42:27 -05:00
}
2021-01-06 12:35:47 -05:00
for _ , pool := range z . serverPools {
2022-07-25 20:51:32 -04:00
bucketInfo , err = pool . GetBucketInfo ( ctx , bucket , opts )
2019-11-19 20:42:27 -05:00
if err != nil {
if isErrBucketNotFound ( err ) {
continue
}
return bucketInfo , err
}
2020-05-19 16:53:54 -04:00
meta , err := globalBucketMetadataSys . Get ( bucket )
if err == nil {
bucketInfo . Created = meta . Created
2022-10-15 14:58:31 -04:00
bucketInfo . Versioning = meta . LockEnabled || globalBucketVersioningSys . Enabled ( bucket )
bucketInfo . ObjectLocking = meta . LockEnabled
2020-05-19 16:53:54 -04:00
}
2019-11-19 20:42:27 -05:00
return bucketInfo , nil
}
return bucketInfo , BucketNotFound {
Bucket : bucket ,
}
}
// IsNotificationSupported returns whether bucket notification is applicable for this layer.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) IsNotificationSupported ( ) bool {
2019-11-19 20:42:27 -05:00
return true
}
2020-07-20 15:52:49 -04:00
// IsListenSupported returns whether listen bucket notification is applicable for this layer.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) IsListenSupported ( ) bool {
2019-11-19 20:42:27 -05:00
return true
}
// IsEncryptionSupported returns whether server side encryption is implemented for this layer.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) IsEncryptionSupported ( ) bool {
2019-11-19 20:42:27 -05:00
return true
}
// IsCompressionSupported returns whether compression is applicable for this layer.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) IsCompressionSupported ( ) bool {
2019-11-19 20:42:27 -05:00
return true
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) IsTaggingSupported ( ) bool {
2020-05-23 14:09:35 -04:00
return true
}
2020-12-01 16:50:33 -05:00
// DeleteBucket - deletes a bucket on all serverPools simultaneously,
// even if one of the serverPools fail to delete buckets, we proceed to
2019-11-19 20:42:27 -05:00
// undo a successful operation.
2021-10-06 13:24:40 -04:00
func ( z * erasureServerPools ) DeleteBucket ( ctx context . Context , bucket string , opts DeleteBucketOptions ) error {
2020-12-01 16:50:33 -05:00
g := errgroup . WithNErrs ( len ( z . serverPools ) )
2019-11-19 20:42:27 -05:00
2020-12-01 16:50:33 -05:00
// Delete buckets in parallel across all serverPools.
for index := range z . serverPools {
2019-11-19 20:42:27 -05:00
index := index
g . Go ( func ( ) error {
2022-03-07 19:18:57 -05:00
if z . IsSuspended ( index ) {
return nil
}
2021-10-06 13:24:40 -04:00
return z . serverPools [ index ] . DeleteBucket ( ctx , bucket , opts )
2019-11-19 20:42:27 -05:00
} , index )
}
errs := g . Wait ( )
2020-03-28 00:52:59 -04:00
2020-05-08 16:44:44 -04:00
// For any write quorum failure, we undo all the delete
// buckets operation by creating all the buckets again.
2019-11-19 20:42:27 -05:00
for _ , err := range errs {
if err != nil {
2021-10-06 13:24:40 -04:00
if ! z . SinglePool ( ) && ! opts . NoRecreate {
undoDeleteBucketServerPools ( context . Background ( ) , bucket , z . serverPools , errs )
2019-11-19 20:42:27 -05:00
}
return err
}
}
2021-10-06 12:20:25 -04:00
// Purge the entire bucket metadata entirely.
2021-10-06 13:24:40 -04:00
z . renameAll ( context . Background ( ) , minioMetaBucket , pathJoin ( bucketMetaPrefix , bucket ) )
2022-07-25 20:51:32 -04:00
// If site replication is configured, hold on to deleted bucket state until sites sync
switch opts . SRDeleteOp {
case MarkDelete :
z . markDelete ( context . Background ( ) , minioMetaBucket , pathJoin ( bucketMetaPrefix , deletedBucketsPrefix , bucket ) )
}
2019-11-19 20:42:27 -05:00
// Success.
return nil
}
2021-02-25 01:24:38 -05:00
// renameAll will rename bucket+prefix unconditionally across all disks to
2021-08-19 12:16:14 -04:00
// minioMetaTmpDeletedBucket + unique uuid,
2021-02-25 01:24:38 -05:00
// Note that set distribution is ignored so it should only be used in cases where
// data is not distributed across sets. Errors are logged but individual
// disk failures are not returned.
func ( z * erasureServerPools ) renameAll ( ctx context . Context , bucket , prefix string ) {
for _ , servers := range z . serverPools {
for _ , set := range servers . sets {
set . renameAll ( ctx , bucket , prefix )
}
}
}
2022-07-25 20:51:32 -04:00
// markDelete will create a directory of deleted bucket in .minio.sys/buckets/.deleted across all disks
// in situations where the deleted bucket needs to be held on to until all sites are in sync for
// site replication
func ( z * erasureServerPools ) markDelete ( ctx context . Context , bucket , prefix string ) {
for _ , servers := range z . serverPools {
for _ , set := range servers . sets {
set . markDelete ( ctx , bucket , prefix )
}
}
}
// purgeDelete deletes vol entry in .minio.sys/buckets/.deleted after site replication
// syncs the delete to peers.
func ( z * erasureServerPools ) purgeDelete ( ctx context . Context , bucket , prefix string ) {
for _ , servers := range z . serverPools {
for _ , set := range servers . sets {
set . purgeDelete ( ctx , bucket , prefix )
}
}
}
2019-11-19 20:42:27 -05:00
// This function is used to undo a successful DeleteBucket operation.
2020-12-01 16:50:33 -05:00
func undoDeleteBucketServerPools ( ctx context . Context , bucket string , serverPools [ ] * erasureSets , errs [ ] error ) {
g := errgroup . WithNErrs ( len ( serverPools ) )
2019-11-19 20:42:27 -05:00
2020-12-01 16:50:33 -05:00
// Undo previous delete bucket on all underlying serverPools.
for index := range serverPools {
2019-11-19 20:42:27 -05:00
index := index
g . Go ( func ( ) error {
if errs [ index ] == nil {
2022-07-25 20:51:32 -04:00
return serverPools [ index ] . MakeBucketWithLocation ( ctx , bucket , MakeBucketOptions { } )
2019-11-19 20:42:27 -05:00
}
return nil
} , index )
}
g . Wait ( )
}
2020-12-01 16:50:33 -05:00
// List all buckets from one of the serverPools, we are not doing merge
2019-11-19 20:42:27 -05:00
// sort here just for simplification. As per design it is assumed
2020-12-01 16:50:33 -05:00
// that all buckets are present on all serverPools.
2022-07-25 20:51:32 -04:00
func ( z * erasureServerPools ) ListBuckets ( ctx context . Context , opts BucketOptions ) ( buckets [ ] BucketInfo , err error ) {
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2022-07-25 20:51:32 -04:00
buckets , err = z . serverPools [ 0 ] . ListBuckets ( ctx , opts )
2020-05-08 16:44:44 -04:00
} else {
2022-07-13 10:44:50 -04:00
for idx , pool := range z . serverPools {
if z . IsSuspended ( idx ) {
continue
}
2022-07-25 20:51:32 -04:00
buckets , err = pool . ListBuckets ( ctx , opts )
2020-05-08 16:44:44 -04:00
if err != nil {
logger . LogIf ( ctx , err )
continue
}
break
}
2019-11-19 20:42:27 -05:00
}
2020-05-08 16:44:44 -04:00
if err != nil {
return nil , err
}
for i := range buckets {
2022-07-25 20:51:32 -04:00
createdAt , err := globalBucketMetadataSys . CreatedAt ( buckets [ i ] . Name )
2020-05-08 16:44:44 -04:00
if err == nil {
2022-07-25 20:51:32 -04:00
buckets [ i ] . Created = createdAt
2020-05-08 16:44:44 -04:00
}
2019-11-19 20:42:27 -05:00
}
2020-05-08 16:44:44 -04:00
return buckets , nil
2019-11-19 20:42:27 -05:00
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) HealFormat ( ctx context . Context , dryRun bool ) ( madmin . HealResultItem , error ) {
2019-11-19 20:42:27 -05:00
// Acquire lock on format.json
2020-11-04 11:25:42 -05:00
formatLock := z . NewNSLock ( minioMetaBucket , formatConfigFile )
2021-04-29 23:55:21 -04:00
lkctx , err := formatLock . GetLock ( ctx , globalOperationTimeout )
2021-03-03 21:36:43 -05:00
if err != nil {
2019-11-19 20:42:27 -05:00
return madmin . HealResultItem { } , err
}
2021-04-29 23:55:21 -04:00
ctx = lkctx . Context ( )
defer formatLock . Unlock ( lkctx . Cancel )
2019-11-19 20:42:27 -05:00
2022-01-02 12:15:06 -05:00
r := madmin . HealResultItem {
2019-11-19 20:42:27 -05:00
Type : madmin . HealItemMetadata ,
Detail : "disk-format" ,
}
2020-01-15 20:19:13 -05:00
var countNoHeal int
2021-01-06 12:35:47 -05:00
for _ , pool := range z . serverPools {
result , err := pool . HealFormat ( ctx , dryRun )
2020-09-17 00:14:35 -04:00
if err != nil && ! errors . Is ( err , errNoHealRequired ) {
2019-11-19 20:42:27 -05:00
logger . LogIf ( ctx , err )
continue
}
2020-12-01 16:50:33 -05:00
// Count errNoHealRequired across all serverPools,
2020-01-15 20:19:13 -05:00
// to return appropriate error to the caller
2020-09-17 00:14:35 -04:00
if errors . Is ( err , errNoHealRequired ) {
2020-01-15 20:19:13 -05:00
countNoHeal ++
}
2019-11-20 13:10:26 -05:00
r . DiskCount += result . DiskCount
r . SetCount += result . SetCount
r . Before . Drives = append ( r . Before . Drives , result . Before . Drives ... )
r . After . Drives = append ( r . After . Drives , result . After . Drives ... )
2019-11-19 20:42:27 -05:00
}
2020-09-17 00:14:35 -04:00
2020-12-01 16:50:33 -05:00
// No heal returned by all serverPools, return errNoHealRequired
if countNoHeal == len ( z . serverPools ) {
2020-01-15 20:19:13 -05:00
return r , errNoHealRequired
}
2020-09-17 00:14:35 -04:00
2019-11-19 20:42:27 -05:00
return r , nil
}
2020-12-13 14:57:08 -05:00
func ( z * erasureServerPools ) HealBucket ( ctx context . Context , bucket string , opts madmin . HealOpts ) ( madmin . HealResultItem , error ) {
2022-01-02 12:15:06 -05:00
r := madmin . HealResultItem {
2019-11-19 20:42:27 -05:00
Type : madmin . HealItemBucket ,
Bucket : bucket ,
}
2020-12-14 15:07:07 -05:00
// Attempt heal on the bucket metadata, ignore any failures
2022-08-03 02:10:22 -04:00
hopts := opts
hopts . Recreate = false
defer z . HealObject ( ctx , minioMetaBucket , pathJoin ( bucketMetaPrefix , bucket , bucketMetadataFile ) , "" , hopts )
2020-12-13 14:57:08 -05:00
2021-01-06 12:35:47 -05:00
for _ , pool := range z . serverPools {
result , err := pool . HealBucket ( ctx , bucket , opts )
2019-11-19 20:42:27 -05:00
if err != nil {
switch err . ( type ) {
case BucketNotFound :
continue
}
return result , err
}
2019-11-20 13:10:26 -05:00
r . DiskCount += result . DiskCount
r . SetCount += result . SetCount
r . Before . Drives = append ( r . Before . Drives , result . Before . Drives ... )
r . After . Drives = append ( r . After . Drives , result . After . Drives ... )
2019-11-19 20:42:27 -05:00
}
2020-12-14 15:07:07 -05:00
2019-11-19 20:42:27 -05:00
return r , nil
}
2020-02-25 10:52:28 -05:00
// Walk a bucket, optionally prefix recursively, until we have returned
// all the content to objectInfo channel, it is callers responsibility
// to allocate a receive channel for ObjectInfo, upon any unhandled
// error walker returns error. Optionally if context.Done() is received
// then Walk() stops the walker.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) Walk ( ctx context . Context , bucket , prefix string , results chan <- ObjectInfo , opts ObjectOptions ) error {
2020-02-25 10:52:28 -05:00
if err := checkListObjsArgs ( ctx , bucket , prefix , "" , z ) ; err != nil {
2020-02-25 22:58:58 -05:00
// Upon error close the channel.
close ( results )
2020-02-25 10:52:28 -05:00
return err
}
2022-06-06 18:14:56 -04:00
vcfg , _ := globalBucketVersioningSys . Get ( bucket )
2021-10-12 12:53:17 -04:00
ctx , cancel := context . WithCancel ( ctx )
go func ( ) {
defer cancel ( )
defer close ( results )
2020-07-11 01:21:04 -04:00
2021-10-12 12:53:17 -04:00
for _ , erasureSet := range z . serverPools {
var wg sync . WaitGroup
for _ , set := range erasureSet . sets {
set := set
wg . Add ( 1 )
go func ( ) {
defer wg . Done ( )
2020-07-11 01:21:04 -04:00
2021-10-12 12:53:17 -04:00
disks , _ := set . getOnlineDisksWithHealing ( )
if len ( disks ) == 0 {
cancel ( )
return
}
2020-11-03 11:53:48 -05:00
2021-10-12 12:53:17 -04:00
loadEntry := func ( entry metaCacheEntry ) {
if entry . isDir ( ) {
return
}
2020-11-03 11:53:48 -05:00
2021-10-12 12:53:17 -04:00
fivs , err := entry . fileInfoVersions ( bucket )
if err != nil {
cancel ( )
return
}
2022-06-06 18:14:56 -04:00
2022-08-18 20:49:08 -04:00
versionsSorter ( fivs . Versions ) . reverse ( )
2021-10-12 12:53:17 -04:00
for _ , version := range fivs . Versions {
2022-10-03 05:10:15 -04:00
send := true
if opts . WalkFilter != nil && ! opts . WalkFilter ( version ) {
send = false
}
if ! send {
continue
}
2022-06-06 18:14:56 -04:00
versioned := vcfg != nil && vcfg . Versioned ( version . Name )
2022-10-03 05:10:15 -04:00
objInfo := version . ToObjectInfo ( bucket , version . Name , versioned )
2022-08-18 20:49:08 -04:00
select {
case <- ctx . Done ( ) :
return
2022-10-03 05:10:15 -04:00
case results <- objInfo :
2022-08-18 20:49:08 -04:00
}
2021-10-12 12:53:17 -04:00
}
}
2020-02-25 10:52:28 -05:00
2021-10-12 12:53:17 -04:00
// How to resolve partial results.
resolver := metadataResolutionParams {
dirQuorum : 1 ,
objQuorum : 1 ,
bucket : bucket ,
}
2020-02-25 10:52:28 -05:00
2021-10-12 12:53:17 -04:00
path := baseDirFromPrefix ( prefix )
2022-04-19 11:20:48 -04:00
filterPrefix := strings . Trim ( strings . TrimPrefix ( prefix , path ) , slashSeparator )
if path == prefix {
filterPrefix = ""
2021-10-12 12:53:17 -04:00
}
2020-11-03 11:53:48 -05:00
2021-10-12 12:53:17 -04:00
lopts := listPathRawOptions {
disks : disks ,
bucket : bucket ,
path : path ,
2022-04-19 11:20:48 -04:00
filterPrefix : filterPrefix ,
2021-10-12 12:53:17 -04:00
recursive : true ,
2022-10-03 05:10:15 -04:00
forwardTo : opts . WalkMarker ,
2021-10-12 12:53:17 -04:00
minDisks : 1 ,
reportNotFound : false ,
agreed : loadEntry ,
2022-07-07 16:45:34 -04:00
partial : func ( entries metaCacheEntries , _ [ ] error ) {
2021-10-12 12:53:17 -04:00
entry , ok := entries . resolve ( & resolver )
if ! ok {
// check if we can get one entry atleast
// proceed to heal nonetheless.
entry , _ = entries . firstFound ( )
}
loadEntry ( * entry )
} ,
finished : nil ,
}
2020-11-03 11:53:48 -05:00
2021-10-12 12:53:17 -04:00
if err := listPathRaw ( ctx , lopts ) ; err != nil {
logger . LogIf ( ctx , fmt . Errorf ( "listPathRaw returned %w: opts(%#v)" , err , lopts ) )
2022-08-18 20:49:08 -04:00
cancel ( )
2021-10-12 12:53:17 -04:00
return
}
} ( )
}
wg . Wait ( )
2020-02-25 10:52:28 -05:00
}
} ( )
return nil
}
2020-06-12 23:04:01 -04:00
// HealObjectFn closure function heals the object.
2020-08-24 16:47:01 -04:00
type HealObjectFn func ( bucket , object , versionID string ) error
2020-01-29 01:35:44 -05:00
2022-02-25 15:20:41 -05:00
func listAndHeal ( ctx context . Context , bucket , prefix string , set * erasureObjects , healEntry func ( metaCacheEntry ) error ) error {
2021-03-06 12:25:48 -05:00
ctx , cancel := context . WithCancel ( ctx )
2021-12-25 12:01:44 -05:00
defer cancel ( )
2021-03-06 12:25:48 -05:00
2021-12-25 12:01:44 -05:00
disks , _ := set . getOnlineDisksWithHealing ( )
if len ( disks ) == 0 {
2022-08-04 19:10:08 -04:00
return errors . New ( "listAndHeal: No non-healing drives found" )
2021-12-25 12:01:44 -05:00
}
2021-03-06 12:25:48 -05:00
2021-12-25 12:01:44 -05:00
// How to resolve partial results.
resolver := metadataResolutionParams {
dirQuorum : 1 ,
objQuorum : 1 ,
bucket : bucket ,
strict : false , // Allow less strict matching.
}
2020-10-01 23:24:34 -04:00
2021-12-25 12:01:44 -05:00
path := baseDirFromPrefix ( prefix )
2022-04-19 11:20:48 -04:00
filterPrefix := strings . Trim ( strings . TrimPrefix ( prefix , path ) , slashSeparator )
if path == prefix {
filterPrefix = ""
2021-12-25 12:01:44 -05:00
}
2021-08-25 20:46:20 -04:00
2021-12-25 12:01:44 -05:00
lopts := listPathRawOptions {
disks : disks ,
bucket : bucket ,
path : path ,
2022-04-19 11:20:48 -04:00
filterPrefix : filterPrefix ,
2021-12-25 12:01:44 -05:00
recursive : true ,
forwardTo : "" ,
minDisks : 1 ,
reportNotFound : false ,
agreed : func ( entry metaCacheEntry ) {
if err := healEntry ( entry ) ; err != nil {
2022-02-25 15:20:41 -05:00
logger . LogIf ( ctx , err )
cancel ( )
2021-12-25 12:01:44 -05:00
}
} ,
2022-07-07 16:45:34 -04:00
partial : func ( entries metaCacheEntries , _ [ ] error ) {
2021-12-25 12:01:44 -05:00
entry , ok := entries . resolve ( & resolver )
if ! ok {
// check if we can get one entry atleast
// proceed to heal nonetheless.
entry , _ = entries . firstFound ( )
}
2021-01-27 05:19:28 -05:00
2021-12-25 12:01:44 -05:00
if err := healEntry ( * entry ) ; err != nil {
2022-02-25 15:20:41 -05:00
logger . LogIf ( ctx , err )
cancel ( )
2021-12-25 12:01:44 -05:00
return
}
} ,
finished : nil ,
}
2020-01-29 01:35:44 -05:00
2021-12-25 12:01:44 -05:00
if err := listPathRaw ( ctx , lopts ) ; err != nil {
2022-02-25 15:20:41 -05:00
return fmt . Errorf ( "listPathRaw returned %w: opts(%#v)" , err , lopts )
2021-12-25 12:01:44 -05:00
}
2022-02-25 15:20:41 -05:00
return nil
2021-12-25 12:01:44 -05:00
}
2021-03-23 10:57:07 -04:00
2021-12-25 12:01:44 -05:00
func ( z * erasureServerPools ) HealObjects ( ctx context . Context , bucket , prefix string , opts madmin . HealOpts , healObjectFn HealObjectFn ) error {
healEntry := func ( entry metaCacheEntry ) error {
if entry . isDir ( ) {
return nil
}
// We might land at .metacache, .trash, .multipart
// no need to heal them skip, only when bucket
// is '.minio.sys'
if bucket == minioMetaBucket {
if wildcard . Match ( "buckets/*/.metacache/*" , entry . name ) {
return nil
}
if wildcard . Match ( "tmp/*" , entry . name ) {
return nil
}
if wildcard . Match ( "multipart/*" , entry . name ) {
return nil
}
if wildcard . Match ( "tmp-old/*" , entry . name ) {
return nil
}
}
fivs , err := entry . fileInfoVersions ( bucket )
if err != nil {
return healObjectFn ( bucket , entry . name , "" )
}
2021-09-02 20:45:30 -04:00
2021-12-25 12:01:44 -05:00
for _ , version := range fivs . Versions {
2022-06-20 11:07:45 -04:00
err := healObjectFn ( bucket , version . Name , version . VersionID )
if err != nil && ! isErrObjectNotFound ( err ) && ! isErrVersionNotFound ( err ) {
2021-12-25 12:01:44 -05:00
return err
}
}
2021-09-21 17:55:17 -04:00
2021-12-25 12:01:44 -05:00
return nil
}
ctx , cancel := context . WithCancel ( ctx )
defer cancel ( )
2022-02-25 15:20:41 -05:00
var poolErrs [ ] [ ] error
for idx , erasureSet := range z . serverPools {
if z . IsSuspended ( idx ) {
continue
}
errs := make ( [ ] error , len ( erasureSet . sets ) )
2021-12-25 12:01:44 -05:00
var wg sync . WaitGroup
2022-02-25 15:20:41 -05:00
for idx , set := range erasureSet . sets {
wg . Add ( 1 )
go func ( idx int , set * erasureObjects ) {
defer wg . Done ( )
2021-12-25 12:01:44 -05:00
2022-02-25 15:20:41 -05:00
errs [ idx ] = listAndHeal ( ctx , bucket , prefix , set , healEntry )
} ( idx , set )
2019-11-19 20:42:27 -05:00
}
2021-12-25 12:01:44 -05:00
wg . Wait ( )
2022-02-25 15:20:41 -05:00
poolErrs = append ( poolErrs , errs )
}
for _ , errs := range poolErrs {
for _ , err := range errs {
if err == nil {
continue
}
return err
2022-02-16 11:40:18 -05:00
}
}
2022-02-25 15:20:41 -05:00
return nil
2019-11-19 20:42:27 -05:00
}
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) HealObject ( ctx context . Context , bucket , object , versionID string , opts madmin . HealOpts ) ( madmin . HealResultItem , error ) {
2020-09-19 11:39:41 -04:00
object = encodeDirObject ( object )
2021-12-25 12:01:44 -05:00
errs := make ( [ ] error , len ( z . serverPools ) )
results := make ( [ ] madmin . HealResultItem , len ( z . serverPools ) )
var wg sync . WaitGroup
for idx , pool := range z . serverPools {
2022-01-11 15:27:47 -05:00
if z . IsSuspended ( idx ) {
continue
}
2021-12-25 12:01:44 -05:00
wg . Add ( 1 )
go func ( idx int , pool * erasureSets ) {
defer wg . Done ( )
result , err := pool . HealObject ( ctx , bucket , object , versionID , opts )
result . Object = decodeDirObject ( result . Object )
errs [ idx ] = err
results [ idx ] = result
} ( idx , pool )
}
wg . Wait ( )
2022-06-20 11:07:45 -04:00
// Return the first nil error
for idx , err := range errs {
if err == nil {
return results [ idx ] , nil
2019-11-19 20:42:27 -05:00
}
}
2021-12-25 12:01:44 -05:00
2022-06-20 11:07:45 -04:00
// No pool returned a nil error, return the first non 'not found' error
for idx , err := range errs {
if ! isErrObjectNotFound ( err ) && ! isErrVersionNotFound ( err ) {
return results [ idx ] , err
2021-12-25 12:01:44 -05:00
}
}
2022-06-20 11:07:45 -04:00
// At this stage, all errors are 'not found'
2020-10-22 16:36:24 -04:00
if versionID != "" {
return madmin . HealResultItem { } , VersionNotFound {
Bucket : bucket ,
Object : object ,
VersionID : versionID ,
}
}
2019-11-19 20:42:27 -05:00
return madmin . HealResultItem { } , ObjectNotFound {
Bucket : bucket ,
Object : object ,
}
}
2021-03-16 23:06:57 -04:00
// GetMetrics - returns metrics of local disks
2021-01-18 23:35:38 -05:00
func ( z * erasureServerPools ) GetMetrics ( ctx context . Context ) ( * BackendMetrics , error ) {
2019-12-06 02:16:06 -05:00
logger . LogIf ( ctx , NotImplemented { } )
2021-01-18 23:35:38 -05:00
return & BackendMetrics { } , NotImplemented { }
2019-12-06 02:16:06 -05:00
}
2019-12-28 11:54:43 -05:00
2021-03-04 17:36:23 -05:00
func ( z * erasureServerPools ) getPoolAndSet ( id string ) ( poolIdx , setIdx , diskIdx int , err error ) {
2021-01-06 12:35:47 -05:00
for poolIdx := range z . serverPools {
format := z . serverPools [ poolIdx ] . format
2020-06-12 23:04:01 -04:00
for setIdx , set := range format . Erasure . Sets {
2021-03-04 17:36:23 -05:00
for i , diskID := range set {
2020-05-23 20:38:39 -04:00
if diskID == id {
2021-03-04 17:36:23 -05:00
return poolIdx , setIdx , i , nil
2020-05-23 20:38:39 -04:00
}
}
}
}
2022-08-04 19:10:08 -04:00
return - 1 , - 1 , - 1 , fmt . Errorf ( "DriveID(%s) %w" , id , errDiskNotFound )
2020-05-23 20:38:39 -04:00
}
2020-07-20 21:31:22 -04:00
// HealthOptions takes input options to return sepcific information
type HealthOptions struct {
Maintenance bool
}
// HealthResult returns the current state of the system, also
// additionally with any specific heuristic information which
// was queried
type HealthResult struct {
Healthy bool
2020-08-07 16:22:53 -04:00
HealingDrives int
2021-01-26 23:47:42 -05:00
PoolID , SetID int
2020-07-20 21:31:22 -04:00
WriteQuorum int
}
2021-02-09 04:00:44 -05:00
// ReadHealth returns if the cluster can serve read requests
func ( z * erasureServerPools ) ReadHealth ( ctx context . Context ) bool {
erasureSetUpCount := make ( [ ] [ ] int , len ( z . serverPools ) )
for i := range z . serverPools {
erasureSetUpCount [ i ] = make ( [ ] int , len ( z . serverPools [ i ] . sets ) )
}
diskIDs := globalNotificationSys . GetLocalDiskIDs ( ctx )
diskIDs = append ( diskIDs , getLocalDiskIDs ( z ) )
for _ , localDiskIDs := range diskIDs {
for _ , id := range localDiskIDs {
2021-03-04 17:36:23 -05:00
poolIdx , setIdx , _ , err := z . getPoolAndSet ( id )
2021-02-09 04:00:44 -05:00
if err != nil {
logger . LogIf ( ctx , err )
continue
}
erasureSetUpCount [ poolIdx ] [ setIdx ] ++
}
}
b := z . BackendInfo ( )
2022-06-07 22:08:21 -04:00
poolReadQuorums := make ( [ ] int , len ( b . StandardSCData ) )
for i , data := range b . StandardSCData {
poolReadQuorums [ i ] = data
}
2021-02-09 04:00:44 -05:00
for poolIdx := range erasureSetUpCount {
for setIdx := range erasureSetUpCount [ poolIdx ] {
2022-06-07 22:08:21 -04:00
if erasureSetUpCount [ poolIdx ] [ setIdx ] < poolReadQuorums [ poolIdx ] {
2021-02-09 04:00:44 -05:00
return false
}
}
}
return true
}
2020-07-20 21:31:22 -04:00
// Health - returns current status of the object layer health,
// provides if write access exists across sets, additionally
// can be used to query scenarios if health may be lost
// if this node is taken down by an external orchestrator.
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) Health ( ctx context . Context , opts HealthOptions ) HealthResult {
erasureSetUpCount := make ( [ ] [ ] int , len ( z . serverPools ) )
for i := range z . serverPools {
erasureSetUpCount [ i ] = make ( [ ] int , len ( z . serverPools [ i ] . sets ) )
2020-05-23 20:38:39 -04:00
}
2020-06-04 17:58:34 -04:00
diskIDs := globalNotificationSys . GetLocalDiskIDs ( ctx )
2020-07-20 21:31:22 -04:00
if ! opts . Maintenance {
diskIDs = append ( diskIDs , getLocalDiskIDs ( z ) )
}
2020-05-23 20:38:39 -04:00
2020-07-20 21:31:22 -04:00
for _ , localDiskIDs := range diskIDs {
for _ , id := range localDiskIDs {
2021-03-04 17:36:23 -05:00
poolIdx , setIdx , _ , err := z . getPoolAndSet ( id )
2020-07-20 21:31:22 -04:00
if err != nil {
logger . LogIf ( ctx , err )
continue
}
2021-01-06 12:35:47 -05:00
erasureSetUpCount [ poolIdx ] [ setIdx ] ++
2020-05-23 20:38:39 -04:00
}
}
2020-08-13 18:21:20 -04:00
reqInfo := ( & logger . ReqInfo { } ) . AppendTags ( "maintenance" , strconv . FormatBool ( opts . Maintenance ) )
2021-01-16 15:08:02 -05:00
b := z . BackendInfo ( )
2022-06-07 22:08:21 -04:00
poolWriteQuorums := make ( [ ] int , len ( b . StandardSCData ) )
for i , data := range b . StandardSCData {
poolWriteQuorums [ i ] = data
if data == b . StandardSCParity {
poolWriteQuorums [ i ] = data + 1
}
2020-09-03 01:54:56 -04:00
}
2020-09-04 20:09:02 -04:00
var aggHealStateResult madmin . BgHealState
if opts . Maintenance {
// check if local disks are being healed, if they are being healed
// we need to tell healthy status as 'false' so that this server
// is not taken down for maintenance
var err error
2021-03-04 17:36:23 -05:00
aggHealStateResult , err = getAggregatedBackgroundHealState ( ctx , nil )
2020-09-04 20:09:02 -04:00
if err != nil {
logger . LogIf ( logger . SetReqInfo ( ctx , reqInfo ) , fmt . Errorf ( "Unable to verify global heal status: %w" , err ) )
return HealthResult {
Healthy : false ,
}
}
if len ( aggHealStateResult . HealDisks ) > 0 {
logger . LogIf ( logger . SetReqInfo ( ctx , reqInfo ) , fmt . Errorf ( "Total drives to be healed %d" , len ( aggHealStateResult . HealDisks ) ) )
}
}
2021-01-06 12:35:47 -05:00
for poolIdx := range erasureSetUpCount {
for setIdx := range erasureSetUpCount [ poolIdx ] {
2022-06-07 22:08:21 -04:00
if erasureSetUpCount [ poolIdx ] [ setIdx ] < poolWriteQuorums [ poolIdx ] {
2020-08-13 18:21:20 -04:00
logger . LogIf ( logger . SetReqInfo ( ctx , reqInfo ) ,
2021-01-06 12:35:47 -05:00
fmt . Errorf ( "Write quorum may be lost on pool: %d, set: %d, expected write quorum: %d" ,
2022-06-07 22:08:21 -04:00
poolIdx , setIdx , poolWriteQuorums [ poolIdx ] ) )
2020-07-20 21:31:22 -04:00
return HealthResult {
2020-09-04 20:09:02 -04:00
Healthy : false ,
HealingDrives : len ( aggHealStateResult . HealDisks ) ,
2021-01-26 23:47:42 -05:00
PoolID : poolIdx ,
2020-09-04 20:09:02 -04:00
SetID : setIdx ,
2022-06-07 22:08:21 -04:00
WriteQuorum : poolWriteQuorums [ poolIdx ] ,
2020-07-20 21:31:22 -04:00
}
2020-05-23 20:38:39 -04:00
}
}
}
2020-08-07 16:22:53 -04:00
2022-06-07 22:08:21 -04:00
var maximumWriteQuorum int
for _ , writeQuorum := range poolWriteQuorums {
if maximumWriteQuorum == 0 {
maximumWriteQuorum = writeQuorum
}
if writeQuorum > maximumWriteQuorum {
maximumWriteQuorum = writeQuorum
}
}
2020-08-12 19:53:15 -04:00
// when maintenance is not specified we don't have
// to look at the healing side of the code.
if ! opts . Maintenance {
return HealthResult {
2020-09-03 01:54:56 -04:00
Healthy : true ,
2022-06-07 22:08:21 -04:00
WriteQuorum : maximumWriteQuorum ,
2020-08-12 19:53:15 -04:00
}
}
2020-07-20 21:31:22 -04:00
return HealthResult {
2020-09-03 01:54:56 -04:00
Healthy : len ( aggHealStateResult . HealDisks ) == 0 ,
2020-08-07 16:22:53 -04:00
HealingDrives : len ( aggHealStateResult . HealDisks ) ,
2022-06-07 22:08:21 -04:00
WriteQuorum : maximumWriteQuorum ,
2020-07-20 21:31:22 -04:00
}
2019-12-28 11:54:43 -05:00
}
2020-01-20 11:45:59 -05:00
2021-04-04 16:32:31 -04:00
// PutObjectMetadata - replace or add tags to an existing object
func ( z * erasureServerPools ) PutObjectMetadata ( ctx context . Context , bucket , object string , opts ObjectOptions ) ( ObjectInfo , error ) {
object = encodeDirObject ( object )
if z . SinglePool ( ) {
return z . serverPools [ 0 ] . PutObjectMetadata ( ctx , bucket , object , opts )
}
// We don't know the size here set 1GiB atleast.
2022-01-10 12:07:49 -05:00
idx , err := z . getPoolIdxExistingWithOpts ( ctx , bucket , object , opts )
2021-04-04 16:32:31 -04:00
if err != nil {
return ObjectInfo { } , err
}
return z . serverPools [ idx ] . PutObjectMetadata ( ctx , bucket , object , opts )
}
2020-05-23 14:09:35 -04:00
// PutObjectTags - replace or add tags to an existing object
2021-02-01 16:52:51 -05:00
func ( z * erasureServerPools ) PutObjectTags ( ctx context . Context , bucket , object string , tags string , opts ObjectOptions ) ( ObjectInfo , error ) {
2020-09-19 11:39:41 -04:00
object = encodeDirObject ( object )
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . PutObjectTags ( ctx , bucket , object , tags , opts )
2020-01-20 11:45:59 -05:00
}
2020-06-29 16:07:26 -04:00
2021-02-16 05:43:47 -05:00
// We don't know the size here set 1GiB atleast.
2022-01-10 12:07:49 -05:00
idx , err := z . getPoolIdxExistingWithOpts ( ctx , bucket , object , opts )
2021-02-16 05:43:47 -05:00
if err != nil {
return ObjectInfo { } , err
2020-01-20 11:45:59 -05:00
}
2021-02-16 05:43:47 -05:00
return z . serverPools [ idx ] . PutObjectTags ( ctx , bucket , object , tags , opts )
2020-01-20 11:45:59 -05:00
}
2020-05-23 14:09:35 -04:00
// DeleteObjectTags - delete object tags from an existing object
2021-02-01 16:52:51 -05:00
func ( z * erasureServerPools ) DeleteObjectTags ( ctx context . Context , bucket , object string , opts ObjectOptions ) ( ObjectInfo , error ) {
2020-09-19 11:39:41 -04:00
object = encodeDirObject ( object )
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . DeleteObjectTags ( ctx , bucket , object , opts )
2020-01-20 11:45:59 -05:00
}
2021-02-16 05:43:47 -05:00
2022-01-10 12:07:49 -05:00
idx , err := z . getPoolIdxExistingWithOpts ( ctx , bucket , object , opts )
2021-02-16 05:43:47 -05:00
if err != nil {
return ObjectInfo { } , err
2020-01-20 11:45:59 -05:00
}
2021-02-16 05:43:47 -05:00
return z . serverPools [ idx ] . DeleteObjectTags ( ctx , bucket , object , opts )
2020-01-20 11:45:59 -05:00
}
2020-05-23 14:09:35 -04:00
// GetObjectTags - get object tags from an existing object
2020-12-01 16:50:33 -05:00
func ( z * erasureServerPools ) GetObjectTags ( ctx context . Context , bucket , object string , opts ObjectOptions ) ( * tags . Tags , error ) {
2020-09-19 11:39:41 -04:00
object = encodeDirObject ( object )
2021-01-26 23:47:42 -05:00
if z . SinglePool ( ) {
2020-12-01 16:50:33 -05:00
return z . serverPools [ 0 ] . GetObjectTags ( ctx , bucket , object , opts )
2020-01-20 11:45:59 -05:00
}
2021-02-16 05:43:47 -05:00
2022-01-10 12:07:49 -05:00
idx , err := z . getPoolIdxExistingWithOpts ( ctx , bucket , object , opts )
2021-02-16 05:43:47 -05:00
if err != nil {
return nil , err
2020-01-20 11:45:59 -05:00
}
2021-02-16 05:43:47 -05:00
return z . serverPools [ idx ] . GetObjectTags ( ctx , bucket , object , opts )
2020-01-20 11:45:59 -05:00
}
2021-04-19 13:30:42 -04:00
// TransitionObject - transition object content to target tier.
func ( z * erasureServerPools ) TransitionObject ( ctx context . Context , bucket , object string , opts ObjectOptions ) error {
object = encodeDirObject ( object )
if z . SinglePool ( ) {
return z . serverPools [ 0 ] . TransitionObject ( ctx , bucket , object , opts )
}
2022-07-16 22:35:24 -04:00
// Avoid transitioning an object from a pool being decommissioned.
opts . SkipDecommissioned = true
2022-01-10 12:07:49 -05:00
idx , err := z . getPoolIdxExistingWithOpts ( ctx , bucket , object , opts )
2021-04-19 13:30:42 -04:00
if err != nil {
return err
}
return z . serverPools [ idx ] . TransitionObject ( ctx , bucket , object , opts )
}
// RestoreTransitionedObject - restore transitioned object content locally on this cluster.
func ( z * erasureServerPools ) RestoreTransitionedObject ( ctx context . Context , bucket , object string , opts ObjectOptions ) error {
object = encodeDirObject ( object )
if z . SinglePool ( ) {
return z . serverPools [ 0 ] . RestoreTransitionedObject ( ctx , bucket , object , opts )
}
2022-07-16 22:35:24 -04:00
// Avoid restoring object from a pool being decommissioned.
opts . SkipDecommissioned = true
2022-01-10 12:07:49 -05:00
idx , err := z . getPoolIdxExistingWithOpts ( ctx , bucket , object , opts )
2021-04-19 13:30:42 -04:00
if err != nil {
return err
}
return z . serverPools [ idx ] . RestoreTransitionedObject ( ctx , bucket , object , opts )
}