Support bucket versioning (#9377)

- Implement a new xl.json 2.0.0 format to support,
  this moves the entire marshaling logic to POSIX
  layer, top layer always consumes a common FileInfo
  construct which simplifies the metadata reads.
- Implement list object versions
- Migrate to siphash from crchash for new deployments
  for object placements.

Fixes #2111
This commit is contained in:
Harshavardhana 2020-06-12 20:04:01 -07:00 committed by GitHub
parent 43d6e3ae06
commit 4915433bd2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
203 changed files with 13833 additions and 6919 deletions

View File

@ -9,7 +9,7 @@ ENV GO111MODULE on
RUN \ RUN \
apk add --no-cache git 'curl>7.61.0' && \ apk add --no-cache git 'curl>7.61.0' && \
git clone https://github.com/minio/minio && \ git clone https://github.com/minio/minio && \
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
FROM arm32v7/alpine:3.10 FROM arm32v7/alpine:3.10

View File

@ -9,7 +9,7 @@ ENV GO111MODULE on
RUN \ RUN \
apk add --no-cache git 'curl>7.61.0' && \ apk add --no-cache git 'curl>7.61.0' && \
git clone https://github.com/minio/minio && \ git clone https://github.com/minio/minio && \
curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static . curl -L https://github.com/balena-io/qemu/releases/download/v3.0.0%2Bresin/qemu-3.0.0+resin-arm.tar.gz | tar zxvf - -C . && mv qemu-3.0.0+resin-arm/qemu-arm-static .
FROM arm64v8/alpine:3.10 FROM arm64v8/alpine:3.10

View File

@ -10,4 +10,3 @@ ENV PATH=$PATH:/root/go/bin
RUN go get github.com/go-bindata/go-bindata/go-bindata && \ RUN go get github.com/go-bindata/go-bindata/go-bindata && \
go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs go get github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs

View File

@ -631,7 +631,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
} }
// Check if this setup has an erasure coded backend. // Check if this setup has an erasure coded backend.
if !globalIsXL { if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
return return
} }
@ -779,7 +779,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
} }
// Check if this setup has an erasure coded backend. // Check if this setup has an erasure coded backend.
if !globalIsXL { if !globalIsErasure {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL)
return return
} }
@ -789,7 +789,7 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
// Get local heal status first // Get local heal status first
bgHealStates = append(bgHealStates, getLocalBackgroundHealStatus()) bgHealStates = append(bgHealStates, getLocalBackgroundHealStatus())
if globalIsDistXL { if globalIsDistErasure {
// Get heal status from other peers // Get heal status from other peers
peersHealStates := globalNotificationSys.BackgroundHealStatus() peersHealStates := globalNotificationSys.BackgroundHealStatus()
bgHealStates = append(bgHealStates, peersHealStates...) bgHealStates = append(bgHealStates, peersHealStates...)
@ -862,11 +862,11 @@ const (
AdminUpdateApplyFailure = "XMinioAdminUpdateApplyFailure" AdminUpdateApplyFailure = "XMinioAdminUpdateApplyFailure"
) )
// toAdminAPIErrCode - converts errXLWriteQuorum error to admin API // toAdminAPIErrCode - converts errErasureWriteQuorum error to admin API
// specific error. // specific error.
func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode { func toAdminAPIErrCode(ctx context.Context, err error) APIErrorCode {
switch err { switch err {
case errXLWriteQuorum: case errErasureWriteQuorum:
return ErrAdminConfigNoQuorum return ErrAdminConfigNoQuorum
default: default:
return toAPIErrorCode(ctx, err) return toAPIErrorCode(ctx, err)
@ -1277,7 +1277,7 @@ func (a adminAPIHandlers) OBDInfoHandler(w http.ResponseWriter, r *http.Request)
partialWrite(obdInfo) partialWrite(obdInfo)
} }
if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistXL { if net, ok := vars["perfnet"]; ok && net == "true" && globalIsDistErasure {
obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx)) obdInfo.Perf.Net = append(obdInfo.Perf.Net, globalNotificationSys.NetOBDInfo(deadlinedCtx))
partialWrite(obdInfo) partialWrite(obdInfo)
@ -1384,7 +1384,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
OffDisks += v OffDisks += v
} }
backend = madmin.XLBackend{ backend = madmin.ErasureBackend{
Type: madmin.ErasureType, Type: madmin.ErasureType,
OnlineDisks: OnDisks, OnlineDisks: OnDisks,
OfflineDisks: OffDisks, OfflineDisks: OffDisks,
@ -1413,10 +1413,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
for _, sp := range servers { for _, sp := range servers {
for i, di := range sp.Disks { for i, di := range sp.Disks {
path := "" path := ""
if globalIsXL { if globalIsErasure {
path = di.DrivePath path = di.DrivePath
} }
if globalIsDistXL { if globalIsDistErasure {
path = sp.Endpoint + di.DrivePath path = sp.Endpoint + di.DrivePath
} }
// For distributed // For distributed
@ -1424,13 +1424,13 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
for b := range storageInfo.Backend.Sets[a] { for b := range storageInfo.Backend.Sets[a] {
ep := storageInfo.Backend.Sets[a][b].Endpoint ep := storageInfo.Backend.Sets[a][b].Endpoint
if globalIsDistXL { if globalIsDistErasure {
if strings.Replace(ep, "http://", "", -1) == path || strings.Replace(ep, "https://", "", -1) == path { if strings.Replace(ep, "http://", "", -1) == path || strings.Replace(ep, "https://", "", -1) == path {
sp.Disks[i].State = storageInfo.Backend.Sets[a][b].State sp.Disks[i].State = storageInfo.Backend.Sets[a][b].State
sp.Disks[i].UUID = storageInfo.Backend.Sets[a][b].UUID sp.Disks[i].UUID = storageInfo.Backend.Sets[a][b].UUID
} }
} }
if globalIsXL { if globalIsErasure {
if ep == path { if ep == path {
sp.Disks[i].State = storageInfo.Backend.Sets[a][b].State sp.Disks[i].State = storageInfo.Backend.Sets[a][b].State
sp.Disks[i].UUID = storageInfo.Backend.Sets[a][b].UUID sp.Disks[i].UUID = storageInfo.Backend.Sets[a][b].UUID

View File

@ -33,27 +33,27 @@ import (
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
) )
// adminXLTestBed - encapsulates subsystems that need to be setup for // adminErasureTestBed - encapsulates subsystems that need to be setup for
// admin-handler unit tests. // admin-handler unit tests.
type adminXLTestBed struct { type adminErasureTestBed struct {
xlDirs []string erasureDirs []string
objLayer ObjectLayer objLayer ObjectLayer
router *mux.Router router *mux.Router
} }
// prepareAdminXLTestBed - helper function that setups a single-node // prepareAdminErasureTestBed - helper function that setups a single-node
// XL backend for admin-handler tests. // Erasure backend for admin-handler tests.
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) { func prepareAdminErasureTestBed(ctx context.Context) (*adminErasureTestBed, error) {
// reset global variables to start afresh. // reset global variables to start afresh.
resetTestGlobals() resetTestGlobals()
// Set globalIsXL to indicate that the setup uses an erasure // Set globalIsErasure to indicate that the setup uses an erasure
// code backend. // code backend.
globalIsXL = true globalIsErasure = true
// Initializing objectLayer for HealFormatHandler. // Initializing objectLayer for HealFormatHandler.
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx) objLayer, erasureDirs, xlErr := initTestErasureObjLayer(ctx)
if xlErr != nil { if xlErr != nil {
return nil, xlErr return nil, xlErr
} }
@ -66,7 +66,7 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
// Initialize boot time // Initialize boot time
globalBootTime = UTCNow() globalBootTime = UTCNow()
globalEndpoints = mustGetZoneEndpoints(xlDirs...) globalEndpoints = mustGetZoneEndpoints(erasureDirs...)
newAllSubsystems() newAllSubsystems()
@ -76,36 +76,37 @@ func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
adminRouter := mux.NewRouter() adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter, true, true) registerAdminRouter(adminRouter, true, true)
return &adminXLTestBed{ return &adminErasureTestBed{
xlDirs: xlDirs, erasureDirs: erasureDirs,
objLayer: objLayer, objLayer: objLayer,
router: adminRouter, router: adminRouter,
}, nil }, nil
} }
// TearDown - method that resets the test bed for subsequent unit // TearDown - method that resets the test bed for subsequent unit
// tests to start afresh. // tests to start afresh.
func (atb *adminXLTestBed) TearDown() { func (atb *adminErasureTestBed) TearDown() {
removeRoots(atb.xlDirs) removeRoots(atb.erasureDirs)
resetTestGlobals() resetTestGlobals()
} }
// initTestObjLayer - Helper function to initialize an XL-based object // initTestObjLayer - Helper function to initialize an Erasure-based object
// layer and set globalObjectAPI. // layer and set globalObjectAPI.
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) { func initTestErasureObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
xlDirs, err := getRandomDisks(16) erasureDirs, err := getRandomDisks(16)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
endpoints := mustGetNewEndpoints(xlDirs...) endpoints := mustGetNewEndpoints(erasureDirs...)
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "") storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
if err != nil { if err != nil {
removeRoots(xlDirs) removeRoots(erasureDirs)
return nil, nil, err return nil, nil, err
} }
globalPolicySys = NewPolicySys() globalPolicySys = NewPolicySys()
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format) objLayer := &erasureZones{zones: make([]*erasureSets, 1)}
objLayer.zones[0], err = newErasureSets(ctx, endpoints, storageDisks, format)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -114,7 +115,7 @@ func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
globalObjLayerMutex.Lock() globalObjLayerMutex.Lock()
globalObjectAPI = objLayer globalObjectAPI = objLayer
globalObjLayerMutex.Unlock() globalObjLayerMutex.Unlock()
return objLayer, xlDirs, nil return objLayer, erasureDirs, nil
} }
// cmdType - Represents different service subcomands like status, stop // cmdType - Represents different service subcomands like status, stop
@ -183,9 +184,9 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx) adminTestBed, err := prepareAdminErasureTestBed(ctx)
if err != nil { if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
} }
defer adminTestBed.TearDown() defer adminTestBed.TearDown()
@ -254,9 +255,9 @@ func TestAdminServerInfo(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx) adminTestBed, err := prepareAdminErasureTestBed(ctx)
if err != nil { if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.") t.Fatal("Failed to initialize a single node Erasure backend for admin handler tests.")
} }
defer adminTestBed.TearDown() defer adminTestBed.TearDown()
@ -298,7 +299,7 @@ func TestToAdminAPIErrCode(t *testing.T) {
}{ }{
// 1. Server not in quorum. // 1. Server not in quorum.
{ {
err: errXLWriteQuorum, err: errErasureWriteQuorum,
expectedAPIErr: ErrAdminConfigNoQuorum, expectedAPIErr: ErrAdminConfigNoQuorum,
}, },
// 2. No error. // 2. No error.

View File

@ -21,7 +21,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http" "net/http"
"strings"
"sync" "sync"
"time" "time"
@ -193,7 +192,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
respBytes []byte, apiErr APIError, errMsg string) { respBytes []byte, apiErr APIError, errMsg string) {
existsAndLive := false existsAndLive := false
he, exists := ahs.getHealSequence(h.path) he, exists := ahs.getHealSequence(pathJoin(h.bucket, h.object))
if exists { if exists {
existsAndLive = !he.hasEnded() existsAndLive = !he.hasEnded()
} }
@ -220,8 +219,9 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
// Check if new heal sequence to be started overlaps with any // Check if new heal sequence to be started overlaps with any
// existing, running sequence // existing, running sequence
hpath := pathJoin(h.bucket, h.object)
for k, hSeq := range ahs.healSeqMap { for k, hSeq := range ahs.healSeqMap {
if !hSeq.hasEnded() && (HasPrefix(k, h.path) || HasPrefix(h.path, k)) { if !hSeq.hasEnded() && (HasPrefix(k, hpath) || HasPrefix(hpath, k)) {
errMsg = "The provided heal sequence path overlaps with an existing " + errMsg = "The provided heal sequence path overlaps with an existing " +
fmt.Sprintf("heal path: %s", k) fmt.Sprintf("heal path: %s", k)
@ -230,7 +230,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
} }
// Add heal state and start sequence // Add heal state and start sequence
ahs.healSeqMap[h.path] = h ahs.healSeqMap[hpath] = h
// Launch top-level background heal go-routine // Launch top-level background heal go-routine
go h.healSequenceStart() go h.healSequenceStart()
@ -251,11 +251,11 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
// status results from global state and returns its JSON // status results from global state and returns its JSON
// representation. The clientToken helps ensure there aren't // representation. The clientToken helps ensure there aren't
// conflicting clients fetching status. // conflicting clients fetching status.
func (ahs *allHealState) PopHealStatusJSON(path string, func (ahs *allHealState) PopHealStatusJSON(hpath string,
clientToken string) ([]byte, APIErrorCode) { clientToken string) ([]byte, APIErrorCode) {
// fetch heal state for given path // fetch heal state for given path
h, exists := ahs.getHealSequence(path) h, exists := ahs.getHealSequence(hpath)
if !exists { if !exists {
// If there is no such heal sequence, return error. // If there is no such heal sequence, return error.
return nil, ErrHealNoSuchProcess return nil, ErrHealNoSuchProcess
@ -296,18 +296,17 @@ func (ahs *allHealState) PopHealStatusJSON(path string,
// healSource denotes single entity and heal option. // healSource denotes single entity and heal option.
type healSource struct { type healSource struct {
path string // entity path (format, buckets, objects) to heal bucket string
opts *madmin.HealOpts // optional heal option overrides default setting object string
versionID string
opts *madmin.HealOpts // optional heal option overrides default setting
} }
// healSequence - state for each heal sequence initiated on the // healSequence - state for each heal sequence initiated on the
// server. // server.
type healSequence struct { type healSequence struct {
// bucket, and prefix on which heal seq. was initiated // bucket, and object on which heal seq. was initiated
bucket, objPrefix string bucket, object string
// path is just pathJoin(bucket, objPrefix)
path string
// A channel of entities (format, buckets, objects) to heal // A channel of entities (format, buckets, objects) to heal
sourceCh chan healSource sourceCh chan healSource
@ -377,8 +376,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
return &healSequence{ return &healSequence{
respCh: make(chan healResult), respCh: make(chan healResult),
bucket: bucket, bucket: bucket,
objPrefix: objPrefix, object: objPrefix,
path: pathJoin(bucket, objPrefix),
reportProgress: true, reportProgress: true,
startTime: UTCNow(), startTime: UTCNow(),
clientToken: mustGetUUID(), clientToken: mustGetUUID(),
@ -618,7 +616,9 @@ func (h *healSequence) healSequenceStart() {
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error { func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
// Send heal request // Send heal request
task := healTask{ task := healTask{
path: source.path, bucket: source.bucket,
object: source.object,
versionID: source.versionID,
opts: h.settings, opts: h.settings,
responseCh: h.respCh, responseCh: h.respCh,
} }
@ -690,11 +690,11 @@ func (h *healSequence) healItemsFromSourceCh() error {
} }
var itemType madmin.HealItemType var itemType madmin.HealItemType
switch { switch {
case source.path == nopHeal: case source.bucket == nopHeal:
continue continue
case source.path == SlashSeparator: case source.bucket == SlashSeparator:
itemType = madmin.HealItemMetadata itemType = madmin.HealItemMetadata
case !strings.Contains(source.path, SlashSeparator): case source.bucket != "" && source.object == "":
itemType = madmin.HealItemBucket itemType = madmin.HealItemBucket
default: default:
itemType = madmin.HealItemObject itemType = madmin.HealItemObject
@ -762,12 +762,16 @@ func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
// NOTE: Healing on meta is run regardless // NOTE: Healing on meta is run regardless
// of any bucket being selected, this is to ensure that // of any bucket being selected, this is to ensure that
// meta are always upto date and correct. // meta are always upto date and correct.
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error { return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string) error {
if h.isQuitting() { if h.isQuitting() {
return errHealStopSignalled return errHealStopSignalled
} }
herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata) herr := h.queueHealTask(healSource{
bucket: bucket,
object: object,
versionID: versionID,
}, madmin.HealItemBucketMetadata)
// Object might have been deleted, by the time heal // Object might have been deleted, by the time heal
// was attempted we ignore this object an move on. // was attempted we ignore this object an move on.
if isErrObjectNotFound(herr) { if isErrObjectNotFound(herr) {
@ -791,7 +795,7 @@ func (h *healSequence) healDiskFormat() error {
return errServerNotInitialized return errServerNotInitialized
} }
return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata) return h.queueHealTask(healSource{bucket: SlashSeparator}, madmin.HealItemMetadata)
} }
// healBuckets - check for all buckets heal or just particular bucket. // healBuckets - check for all buckets heal or just particular bucket.
@ -833,7 +837,7 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
return errServerNotInitialized return errServerNotInitialized
} }
if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil { if err := h.queueHealTask(healSource{bucket: bucket}, madmin.HealItemBucket); err != nil {
return err return err
} }
@ -842,12 +846,12 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
} }
if !h.settings.Recursive { if !h.settings.Recursive {
if h.objPrefix != "" { if h.object != "" {
// Check if an object named as the objPrefix exists, // Check if an object named as the objPrefix exists,
// and if so heal it. // and if so heal it.
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{}) _, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.object, ObjectOptions{})
if err == nil { if err == nil {
if err = h.healObject(bucket, h.objPrefix); err != nil { if err = h.healObject(bucket, h.object, ""); err != nil {
return err return err
} }
} }
@ -856,14 +860,14 @@ func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
return nil return nil
} }
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil { if err := objectAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil {
return errFnHealFromAPIErr(h.ctx, err) return errFnHealFromAPIErr(h.ctx, err)
} }
return nil return nil
} }
// healObject - heal the given object and record result // healObject - heal the given object and record result
func (h *healSequence) healObject(bucket, object string) error { func (h *healSequence) healObject(bucket, object, versionID string) error {
// Get current object layer instance. // Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn() objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil { if objectAPI == nil {
@ -874,5 +878,9 @@ func (h *healSequence) healObject(bucket, object string) error {
return errHealStopSignalled return errHealStopSignalled
} }
return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject) return h.queueHealTask(healSource{
bucket: bucket,
object: object,
versionID: versionID,
}, madmin.HealItemObject)
} }

View File

@ -64,7 +64,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// DataUsageInfo operations // DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler)) adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
if globalIsDistXL || globalIsXL { if globalIsDistErasure || globalIsErasure {
/// Heal operations /// Heal operations
// Heal processing endpoint. // Heal processing endpoint.
@ -172,7 +172,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
} }
// Quota operations // Quota operations
if globalIsXL || globalIsDistXL { if globalIsDistErasure || globalIsErasure {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn { if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
// GetBucketQuotaConfig // GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc( adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
@ -185,7 +185,7 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
// -- Top APIs -- // -- Top APIs --
// Top locks // Top locks
if globalIsDistXL { if globalIsDistErasure {
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler)) adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
} }

View File

@ -29,7 +29,7 @@ import (
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties { func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
var disks []madmin.Disk var disks []madmin.Disk
addr := r.Host addr := r.Host
if globalIsDistXL { if globalIsDistErasure {
addr = GetLocalPeer(endpointZones) addr = GetLocalPeer(endpointZones)
} }
network := make(map[string]string) network := make(map[string]string)

View File

@ -20,9 +20,18 @@ import (
"encoding/xml" "encoding/xml"
) )
// ObjectIdentifier carries key name for the object to delete. // DeletedObject objects deleted
type ObjectIdentifier struct { type DeletedObject struct {
DeleteMarker bool `xml:"DeleteMarker"`
DeleteMarkerVersionID string `xml:"DeleteMarkerVersionId,omitempty"`
ObjectName string `xml:"Key,omitempty"`
VersionID string `xml:"VersionId,omitempty"`
}
// ObjectToDelete carries key name for the object to delete.
type ObjectToDelete struct {
ObjectName string `xml:"Key"` ObjectName string `xml:"Key"`
VersionID string `xml:"VersionId"`
} }
// createBucketConfiguration container for bucket configuration request from client. // createBucketConfiguration container for bucket configuration request from client.
@ -37,5 +46,5 @@ type DeleteObjectsRequest struct {
// Element to enable quiet mode for the request // Element to enable quiet mode for the request
Quiet bool Quiet bool
// List of objects to be deleted // List of objects to be deleted
Objects []ObjectIdentifier `xml:"Object"` Objects []ObjectToDelete `xml:"Object"`
} }

View File

@ -36,6 +36,7 @@ import (
"github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock" objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy" "github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/hash"
) )
@ -538,9 +539,9 @@ var errorCodes = errorCodeMap{
HTTPStatusCode: http.StatusNotFound, HTTPStatusCode: http.StatusNotFound,
}, },
ErrNoSuchVersion: { ErrNoSuchVersion: {
Code: "NoSuchVersion", Code: "InvalidArgument",
Description: "Indicates that the version ID specified in the request does not match an existing version.", Description: "Invalid version id specified",
HTTPStatusCode: http.StatusNotFound, HTTPStatusCode: http.StatusBadRequest,
}, },
ErrNotImplemented: { ErrNotImplemented: {
Code: "NotImplemented", Code: "NotImplemented",
@ -1782,6 +1783,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrBucketAlreadyOwnedByYou apiErr = ErrBucketAlreadyOwnedByYou
case ObjectNotFound: case ObjectNotFound:
apiErr = ErrNoSuchKey apiErr = ErrNoSuchKey
case MethodNotAllowed:
apiErr = ErrMethodNotAllowed
case VersionNotFound:
apiErr = ErrNoSuchVersion
case ObjectAlreadyExists: case ObjectAlreadyExists:
apiErr = ErrMethodNotAllowed apiErr = ErrMethodNotAllowed
case ObjectNameInvalid: case ObjectNameInvalid:
@ -1918,6 +1923,12 @@ func toAPIError(ctx context.Context, err error) APIError {
e.Error()), e.Error()),
HTTPStatusCode: http.StatusBadRequest, HTTPStatusCode: http.StatusBadRequest,
} }
case versioning.Error:
apiErr = APIError{
Code: "IllegalVersioningConfigurationException",
Description: fmt.Sprintf("Versioning configuration specified in the request is invalid. (%s)", e.Error()),
HTTPStatusCode: http.StatusBadRequest,
}
case lifecycle.Error: case lifecycle.Error:
apiErr = APIError{ apiErr = APIError{
Code: "InvalidRequest", Code: "InvalidRequest",

View File

@ -29,6 +29,7 @@ import (
"github.com/minio/minio/cmd/crypto" "github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http" xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/pkg/bucket/lifecycle"
) )
// Returns a hexadecimal representation of time at the // Returns a hexadecimal representation of time at the
@ -152,5 +153,26 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
w.Header().Set(xhttp.ContentRange, contentRange) w.Header().Set(xhttp.ContentRange, contentRange)
} }
// Set the relevant version ID as part of the response header.
if objInfo.VersionID != "" {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
}
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
ruleID, expiryTime := lc.PredictExpiryTime(lifecycle.ObjectOpts{
Name: objInfo.Name,
UserTags: objInfo.UserTags,
VersionID: objInfo.VersionID,
ModTime: objInfo.ModTime,
IsLatest: objInfo.IsLatest,
DeleteMarker: objInfo.DeleteMarker,
})
if !expiryTime.IsZero() {
w.Header()[xhttp.AmzExpiration] = []string{
fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, expiryTime.Format(http.TimeFormat), ruleID),
}
}
}
return nil return nil
} }

View File

@ -81,6 +81,7 @@ type ListVersionsResponse struct {
CommonPrefixes []CommonPrefix CommonPrefixes []CommonPrefix
Versions []ObjectVersion Versions []ObjectVersion
DeleteMarkers []DeletedVersion
// Encoding type used to encode object keys in the response. // Encoding type used to encode object keys in the response.
EncodingType string `xml:"EncodingType,omitempty"` EncodingType string `xml:"EncodingType,omitempty"`
@ -237,8 +238,22 @@ type Bucket struct {
type ObjectVersion struct { type ObjectVersion struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"` XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"`
Object Object
VersionID string `xml:"VersionId"`
IsLatest bool IsLatest bool
VersionID string `xml:"VersionId"`
}
// DeletedVersion container for the delete object version metadata.
type DeletedVersion struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteMarker" json:"-"`
IsLatest bool
Key string
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
// Owner of the object.
Owner Owner
VersionID string `xml:"VersionId"`
} }
// StringMap is a map[string]string. // StringMap is a map[string]string.
@ -333,9 +348,10 @@ type CompleteMultipartUploadResponse struct {
// DeleteError structure. // DeleteError structure.
type DeleteError struct { type DeleteError struct {
Code string Code string
Message string Message string
Key string Key string
VersionID string `xml:"VersionId"`
} }
// DeleteObjectsResponse container for multiple object deletes. // DeleteObjectsResponse container for multiple object deletes.
@ -343,7 +359,7 @@ type DeleteObjectsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"` XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
// Collection of all deleted objects // Collection of all deleted objects
DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"` DeletedObjects []DeletedObject `xml:"Deleted,omitempty"`
// Collection of errors deleting certain objects. // Collection of errors deleting certain objects.
Errors []DeleteError `xml:"Error,omitempty"` Errors []DeleteError `xml:"Error,omitempty"`
@ -413,8 +429,9 @@ func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
} }
// generates an ListBucketVersions response for the said bucket with other enumerated options. // generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse { func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
var versions []ObjectVersion var versions []ObjectVersion
var deletedVersions []DeletedVersion
var prefixes []CommonPrefix var prefixes []CommonPrefix
var owner = Owner{} var owner = Owner{}
var data = ListVersionsResponse{} var data = ListVersionsResponse{}
@ -436,15 +453,29 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
} else { } else {
content.StorageClass = globalMinioDefaultStorageClass content.StorageClass = globalMinioDefaultStorageClass
} }
content.Owner = owner content.Owner = owner
content.VersionID = "null" content.VersionID = object.VersionID
content.IsLatest = true if content.VersionID == "" {
content.VersionID = nullVersionID
}
content.IsLatest = object.IsLatest
versions = append(versions, content) versions = append(versions, content)
} }
for _, deleted := range resp.DeleteObjects {
var dv = DeletedVersion{
Key: s3EncodeName(deleted.Name, encodingType),
Owner: owner,
LastModified: deleted.ModTime.UTC().Format(iso8601TimeFormat),
VersionID: deleted.VersionID,
IsLatest: deleted.IsLatest,
}
deletedVersions = append(deletedVersions, dv)
}
data.Name = bucket data.Name = bucket
data.Versions = versions data.Versions = versions
data.DeleteMarkers = deletedVersions
data.EncodingType = encodingType data.EncodingType = encodingType
data.Prefix = s3EncodeName(prefix, encodingType) data.Prefix = s3EncodeName(prefix, encodingType)
data.KeyMarker = s3EncodeName(marker, encodingType) data.KeyMarker = s3EncodeName(marker, encodingType)
@ -452,6 +483,8 @@ func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingTyp
data.MaxKeys = maxKeys data.MaxKeys = maxKeys
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType) data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
data.NextVersionIDMarker = resp.NextVersionIDMarker
data.VersionIDMarker = versionIDMarker
data.IsTruncated = resp.IsTruncated data.IsTruncated = resp.IsTruncated
for _, prefix := range resp.Prefixes { for _, prefix := range resp.Prefixes {
@ -666,11 +699,14 @@ func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMult
} }
// generate multi objects delete response. // generate multi objects delete response.
func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, errs []DeleteError) DeleteObjectsResponse { func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse {
deleteResp := DeleteObjectsResponse{} deleteResp := DeleteObjectsResponse{}
if !quiet { if !quiet {
deleteResp.DeletedObjects = deletedObjects deleteResp.DeletedObjects = deletedObjects
} }
if len(errs) == len(deletedObjects) {
deleteResp.DeletedObjects = nil
}
deleteResp.Errors = errs deleteResp.Errors = errs
return deleteResp return deleteResp
} }

View File

@ -224,9 +224,9 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
// ListObjectsV2 // ListObjectsV2
bucket.Methods(http.MethodGet).HandlerFunc( bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2") maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
// ListBucketVersions // ListObjectVersions
bucket.Methods(http.MethodGet).HandlerFunc( bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "") maxClients(collectAPIStats("listobjectversions", httpTraceAll(api.ListObjectVersionsHandler)))).Queries("versions", "")
// ListObjectsV1 (Legacy) // ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc( bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))) maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))

View File

@ -18,6 +18,7 @@ package cmd
import ( import (
"context" "context"
"path"
"time" "time"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
@ -29,8 +30,10 @@ import (
// path: 'bucket/' or '/bucket/' => Heal bucket // path: 'bucket/' or '/bucket/' => Heal bucket
// path: 'bucket/object' => Heal object // path: 'bucket/object' => Heal object
type healTask struct { type healTask struct {
path string bucket string
opts madmin.HealOpts object string
versionID string
opts madmin.HealOpts
// Healing response will be sent here // Healing response will be sent here
responseCh chan healResult responseCh chan healResult
} }
@ -79,17 +82,18 @@ func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
var res madmin.HealResultItem var res madmin.HealResultItem
var err error var err error
bucket, object := path2BucketObject(task.path)
switch { switch {
case bucket == "" && object == "": case task.bucket == nopHeal:
continue
case task.bucket == SlashSeparator:
res, err = healDiskFormat(ctx, objAPI, task.opts) res, err = healDiskFormat(ctx, objAPI, task.opts)
case bucket != "" && object == "": case task.bucket != "" && task.object == "":
res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove) res, err = objAPI.HealBucket(ctx, task.bucket, task.opts.DryRun, task.opts.Remove)
case bucket != "" && object != "": case task.bucket != "" && task.object != "":
res, err = objAPI.HealObject(ctx, bucket, object, task.opts) res, err = objAPI.HealObject(ctx, task.bucket, task.object, task.versionID, task.opts)
} }
if task.path != slashSeparator && task.path != nopHeal { if task.bucket != "" && task.object != "" {
ObjectPathUpdated(task.path) ObjectPathUpdated(path.Join(task.bucket, task.object))
} }
task.responseCh <- healResult{result: res, err: err} task.responseCh <- healResult{result: res, err: err}
case <-h.doneCh: case <-h.doneCh:

View File

@ -33,7 +33,7 @@ func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
// 1. Only the concerned erasure set will be listed and healed // 1. Only the concerned erasure set will be listed and healed
// 2. Only the node hosting the disk is responsible to perform the heal // 2. Only the node hosting the disk is responsible to perform the heal
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) { func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*xlZones) z, ok := objAPI.(*erasureZones)
if !ok { if !ok {
return return
} }
@ -84,10 +84,10 @@ func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
} }
// Reformat disks // Reformat disks
bgSeq.sourceCh <- healSource{path: SlashSeparator} bgSeq.sourceCh <- healSource{bucket: SlashSeparator}
// Ensure that reformatting disks is finished // Ensure that reformatting disks is finished
bgSeq.sourceCh <- healSource{path: nopHeal} bgSeq.sourceCh <- healSource{bucket: nopHeal}
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal)) var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal // Compute the list of erasure set to heal

View File

@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
object := getRandomObjectName() object := getRandomObjectName()
// create bucket. // create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false) err = obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -127,9 +127,9 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
b.StopTimer() b.StopTimer()
} }
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function. // creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType) objLayer, disks, err := prepareTestBackend(ctx, instanceType)
@ -143,9 +143,9 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
runPutObjectPartBenchmark(b, objLayer, objSize) runPutObjectPartBenchmark(b, objLayer, objSize)
} }
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function. // creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType) objLayer, disks, err := prepareTestBackend(ctx, instanceType)
@ -159,9 +159,9 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
runPutObjectBenchmark(b, objLayer, objSize) runPutObjectBenchmark(b, objLayer, objSize)
} }
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object. // creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) { func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType) objLayer, disks, err := prepareTestBackend(ctx, instanceType)
@ -181,7 +181,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -190,7 +190,7 @@ func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// generate etag for the generated data. // generate etag for the generated data.
// etag of the data to written is required as input for PutObject. // etag of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend. // PutObject is the functions which writes the data onto the FS/Erasure backend.
// get text data generated for number of bytes equal to object size. // get text data generated for number of bytes equal to object size.
md5hex := getMD5Hash(textData) md5hex := getMD5Hash(textData)
@ -240,9 +240,9 @@ func generateBytesData(size int) []byte {
return bytes.Repeat(getRandomByte(), size) return bytes.Repeat(getRandomByte(), size)
} }
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function. // creates Erasure/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) { func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType) objLayer, disks, err := prepareTestBackend(ctx, instanceType)
@ -256,9 +256,9 @@ func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
runGetObjectBenchmark(b, objLayer, objSize) runGetObjectBenchmark(b, objLayer, objSize)
} }
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() . // creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) { func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend. // create a temp Erasure/FS backend.
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType) objLayer, disks, err := prepareTestBackend(ctx, instanceType)
@ -278,7 +278,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -322,7 +322,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name. // obtains random bucket name.
bucket := getRandomBucketName() bucket := getRandomBucketName()
// create bucket. // create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false) err := obj.MakeBucketWithLocation(context.Background(), bucket, BucketOptions{})
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -331,7 +331,7 @@ func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
textData := generateBytesData(objSize) textData := generateBytesData(objSize)
// generate md5sum for the generated data. // generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject. // md5sum of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend. // PutObject is the functions which writes the data onto the FS/Erasure backend.
md5hex := getMD5Hash([]byte(textData)) md5hex := getMD5Hash([]byte(textData))
sha256hex := "" sha256hex := ""

View File

@ -30,25 +30,6 @@ import (
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key. // magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0") var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
type BitrotAlgorithm uint
const (
// SHA256 represents the SHA-256 hash function
SHA256 BitrotAlgorithm = 1 + iota
// HighwayHash256 represents the HighwayHash-256 hash function
HighwayHash256
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
HighwayHash256S
// BLAKE2b512 represents the BLAKE2b-512 hash function
BLAKE2b512
)
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
const (
DefaultBitrotAlgorithm = HighwayHash256S
)
var bitrotAlgorithms = map[BitrotAlgorithm]string{ var bitrotAlgorithms = map[BitrotAlgorithm]string{
SHA256: "sha256", SHA256: "sha256",
BLAKE2b512: "blake2b", BLAKE2b512: "blake2b",

View File

@ -34,7 +34,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
volume := "testvol" volume := "testvol"
filePath := "testfile" filePath := "testfile"
disk, err := newPosix(tmpDir, "") disk, err := newXLStorage(tmpDir, "")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -55,5 +55,6 @@ func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) {
if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 { if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 {
return encConfig, nil return encConfig, nil
} }
return nil, errors.New("Unsupported bucket encryption configuration") return nil, errors.New("Unsupported bucket encryption configuration")
} }

View File

@ -45,9 +45,8 @@ import (
) )
const ( const (
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>` objectLockConfig = "object-lock.xml"
objectLockConfig = "object-lock.xml" bucketTaggingConfigFile = "tagging.xml"
bucketTaggingConfigFile = "tagging.xml"
) )
// Check if there are buckets on server without corresponding entry in etcd backend and // Check if there are buckets on server without corresponding entry in etcd backend and
@ -382,75 +381,86 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
deleteObjectsFn = api.CacheAPI().DeleteObjects deleteObjectsFn = api.CacheAPI().DeleteObjects
} }
var objectsToDelete = map[string]int{} var objectsToDelete = map[ObjectToDelete]int{}
getObjectInfoFn := objectAPI.GetObjectInfo getObjectInfoFn := objectAPI.GetObjectInfo
if api.CacheAPI() != nil { if api.CacheAPI() != nil {
getObjectInfoFn = api.CacheAPI().GetObjectInfo getObjectInfoFn = api.CacheAPI().GetObjectInfo
} }
var dErrs = make([]APIErrorCode, len(deleteObjects.Objects)) dErrs := make([]DeleteError, len(deleteObjects.Objects))
for index, object := range deleteObjects.Objects { for index, object := range deleteObjects.Objects {
if dErrs[index] = checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); dErrs[index] != ErrNone { if apiErrCode := checkRequestAuthType(ctx, r, policy.DeleteObjectAction, bucket, object.ObjectName); apiErrCode != ErrNone {
if dErrs[index] == ErrSignatureDoesNotMatch || dErrs[index] == ErrInvalidAccessKeyID { if apiErrCode == ErrSignatureDoesNotMatch || apiErrCode == ErrInvalidAccessKeyID {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(dErrs[index]), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(apiErrCode), r.URL, guessIsBrowserReq(r))
return return
} }
apiErr := errorCodes.ToAPIErr(apiErrCode)
dErrs[index] = DeleteError{
Code: apiErr.Code,
Message: apiErr.Description,
Key: object.ObjectName,
VersionID: object.VersionID,
}
continue continue
} }
if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled { if object.VersionID != "" {
if apiErr := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn); apiErr != ErrNone { if rcfg, _ := globalBucketObjectLockSys.Get(bucket); rcfg.LockEnabled {
dErrs[index] = apiErr if apiErrCode := enforceRetentionBypassForDelete(ctx, r, bucket, object, getObjectInfoFn); apiErrCode != ErrNone {
continue apiErr := errorCodes.ToAPIErr(apiErrCode)
dErrs[index] = DeleteError{
Code: apiErr.Code,
Message: apiErr.Description,
Key: object.ObjectName,
VersionID: object.VersionID,
}
continue
}
} }
} }
// Avoid duplicate objects, we use map to filter them out. // Avoid duplicate objects, we use map to filter them out.
if _, ok := objectsToDelete[object.ObjectName]; !ok { if _, ok := objectsToDelete[object]; !ok {
objectsToDelete[object.ObjectName] = index objectsToDelete[object] = index
} }
} }
toNames := func(input map[string]int) (output []string) { toNames := func(input map[ObjectToDelete]int) (output []ObjectToDelete) {
output = make([]string, len(input)) output = make([]ObjectToDelete, len(input))
idx := 0 idx := 0
for name := range input { for obj := range input {
output[idx] = name output[idx] = obj
idx++ idx++
} }
return return
} }
deleteList := toNames(objectsToDelete) deleteList := toNames(objectsToDelete)
errs, err := deleteObjectsFn(ctx, bucket, deleteList) dObjects, errs := deleteObjectsFn(ctx, bucket, deleteList, ObjectOptions{
if err != nil { Versioned: globalBucketVersioningSys.Enabled(bucket),
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) })
return
}
for i, objName := range deleteList { deletedObjects := make([]DeletedObject, len(deleteObjects.Objects))
dIdx := objectsToDelete[objName] for i := range errs {
dErrs[dIdx] = toAPIErrorCode(ctx, errs[i]) dindex := objectsToDelete[deleteList[i]]
} apiErr := toAPIError(ctx, errs[i])
if apiErr.Code == "" || apiErr.Code == "NoSuchKey" {
// Collect deleted objects and errors if any. deletedObjects[dindex] = dObjects[i]
var deletedObjects []ObjectIdentifier
var deleteErrors []DeleteError
for index, errCode := range dErrs {
object := deleteObjects.Objects[index]
// Success deleted objects are collected separately.
if errCode == ErrNone || errCode == ErrNoSuchKey {
deletedObjects = append(deletedObjects, object)
continue continue
} }
apiErr := getAPIError(errCode) dErrs[dindex] = DeleteError{
// Error during delete should be collected separately. Code: apiErr.Code,
deleteErrors = append(deleteErrors, DeleteError{ Message: apiErr.Description,
Code: apiErr.Code, Key: deleteList[i].ObjectName,
Message: apiErr.Description, VersionID: deleteList[i].VersionID,
Key: object.ObjectName, }
}) }
var deleteErrors []DeleteError
for _, dErr := range dErrs {
if dErr.Code != "" {
deleteErrors = append(deleteErrors, dErr)
}
} }
// Generate response // Generate response
@ -462,12 +472,21 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
// Notify deleted event for objects. // Notify deleted event for objects.
for _, dobj := range deletedObjects { for _, dobj := range deletedObjects {
objInfo := ObjectInfo{
Name: dobj.ObjectName,
VersionID: dobj.VersionID,
}
if dobj.DeleteMarker {
objInfo = ObjectInfo{
Name: dobj.ObjectName,
DeleteMarker: dobj.DeleteMarker,
VersionID: dobj.DeleteMarkerVersionID,
}
}
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete, EventName: event.ObjectRemovedDelete,
BucketName: bucket, BucketName: bucket,
Object: ObjectInfo{ Object: objInfo,
Name: dobj.ObjectName,
},
ReqParams: extractReqParams(r), ReqParams: extractReqParams(r),
RespElements: extractRespElements(w), RespElements: extractRespElements(w),
UserAgent: r.UserAgent(), UserAgent: r.UserAgent(),
@ -522,12 +541,17 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
return return
} }
opts := BucketOptions{
Location: location,
LockEnabled: objectLockEnabled,
}
if globalDNSConfig != nil { if globalDNSConfig != nil {
sr, err := globalDNSConfig.Get(bucket) sr, err := globalDNSConfig.Get(bucket)
if err != nil { if err != nil {
if err == dns.ErrNoEntriesFound { if err == dns.ErrNoEntriesFound {
// Proceed to creating a bucket. // Proceed to creating a bucket.
if err = objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled); err != nil { if err = objectAPI.MakeBucketWithLocation(ctx, bucket, opts); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
} }
@ -565,7 +589,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
} }
// Proceed to creating a bucket. // Proceed to creating a bucket.
err := objectAPI.MakeBucketWithLocation(ctx, bucket, location, objectLockEnabled) err := objectAPI.MakeBucketWithLocation(ctx, bucket, opts)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
@ -797,9 +821,17 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return return
} }
location := getObjectLocation(r, globalDomainNames, bucket, object) // We must not use the http.Header().Set method here because some (broken)
// clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive).
// Therefore, we have to set the ETag directly as map entry.
w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`} w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`}
w.Header().Set(xhttp.Location, location)
// Set the relevant version ID as part of the response header.
if objInfo.VersionID != "" {
w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID}
}
w.Header().Set(xhttp.Location, getObjectLocation(r, globalDomainNames, bucket, object))
// Notify object created event. // Notify object created event.
defer sendEvent(eventArgs{ defer sendEvent(eventArgs{
@ -826,9 +858,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
Bucket: objInfo.Bucket, Bucket: objInfo.Bucket,
Key: objInfo.Name, Key: objInfo.Name,
ETag: `"` + objInfo.ETag + `"`, ETag: `"` + objInfo.ETag + `"`,
Location: location, Location: w.Header().Get(xhttp.Location),
}) })
writeResponse(w, http.StatusCreated, resp, "application/xml") writeResponse(w, http.StatusCreated, resp, mimeXML)
case "200": case "200":
writeSuccessResponseHeadersOnly(w) writeSuccessResponseHeadersOnly(w)
default: default:
@ -921,79 +953,30 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
// Attempt to delete bucket. // Attempt to delete bucket.
if err := deleteBucket(ctx, bucket, forceDelete); err != nil { if err := deleteBucket(ctx, bucket, forceDelete); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) if _, ok := err.(BucketNotEmpty); ok && (globalBucketVersioningSys.Enabled(bucket) || globalBucketVersioningSys.Suspended(bucket)) {
apiErr := toAPIError(ctx, err)
apiErr.Description = "The bucket you tried to delete is not empty. You must delete all versions in the bucket."
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
} else {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
}
return return
} }
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
if globalDNSConfig != nil { if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil { if err := globalDNSConfig.Delete(bucket); err != nil {
// Deleting DNS entry failed, attempt to create the bucket again. logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually using etcdctl", err))
objectAPI.MakeBucketWithLocation(ctx, bucket, "", false)
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
} }
} }
globalNotificationSys.DeleteBucketMetadata(ctx, bucket)
// Write success response. // Write success response.
writeSuccessNoContent(w) writeSuccessNoContent(w)
} }
// PutBucketVersioningHandler - PUT Bucket Versioning.
// ----------
// No-op. Available for API compatibility.
func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketVersioning")
defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
getBucketInfo := objectAPI.GetBucketInfo
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketVersioningHandler - GET Bucket Versioning.
// ----------
// No-op. Available for API compatibility.
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketVersioning")
defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
getBucketInfo := objectAPI.GetBucketInfo
if _, err := getBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseXML(w, []byte(getBucketVersioningResponse))
}
// PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration. // PutBucketObjectLockConfigHandler - PUT Bucket object lock configuration.
// ---------- // ----------
// Places an Object Lock configuration on the specified bucket. The rule // Places an Object Lock configuration on the specified bucket. The rule

View File

@ -19,6 +19,7 @@ package cmd
import ( import (
"bytes" "bytes"
"encoding/xml" "encoding/xml"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
@ -28,7 +29,7 @@ import (
"github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/auth"
) )
// Wrapper for calling RemoveBucket HTTP handler tests for both XL multiple disks and single node setup. // Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup.
func TestRemoveBucketHandler(t *testing.T) { func TestRemoveBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"}) ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
} }
@ -73,7 +74,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a
} }
} }
// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup. // Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestGetBucketLocationHandler(t *testing.T) { func TestGetBucketLocationHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"}) ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
} }
@ -217,7 +218,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
} }
// Wrapper for calling HeadBucket HTTP handler tests for both XL multiple disks and single node setup. // Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup.
func TestHeadBucketHandler(t *testing.T) { func TestHeadBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"}) ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
} }
@ -322,7 +323,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
} }
// Wrapper for calling TestListMultipartUploadsHandler tests for both XL multiple disks and single node setup. // Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup.
func TestListMultipartUploadsHandler(t *testing.T) { func TestListMultipartUploadsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"}) ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
} }
@ -559,7 +560,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
} }
// Wrapper for calling TestListBucketsHandler tests for both XL multiple disks and single node setup. // Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup.
func TestListBucketsHandler(t *testing.T) { func TestListBucketsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"}) ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
} }
@ -653,7 +654,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap
ExecObjectLayerAPINilTest(t, "", "", instanceType, apiRouter, nilReq) ExecObjectLayerAPINilTest(t, "", "", instanceType, apiRouter, nilReq)
} }
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both XL multiple disks and single node setup. // Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup.
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) { func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"}) ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"})
} }
@ -679,14 +680,17 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
objectNames = append(objectNames, objectName) objectNames = append(objectNames, objectName)
} }
getObjectIdentifierList := func(objectNames []string) (objectIdentifierList []ObjectIdentifier) { getObjectToDeleteList := func(objectNames []string) (objectList []ObjectToDelete) {
for _, objectName := range objectNames { for _, objectName := range objectNames {
objectIdentifierList = append(objectIdentifierList, ObjectIdentifier{objectName}) objectList = append(objectList, ObjectToDelete{
ObjectName: objectName,
})
} }
return objectIdentifierList return objectList
} }
getDeleteErrorList := func(objects []ObjectIdentifier) (deleteErrorList []DeleteError) {
getDeleteErrorList := func(objects []ObjectToDelete) (deleteErrorList []DeleteError) {
for _, obj := range objects { for _, obj := range objects {
deleteErrorList = append(deleteErrorList, DeleteError{ deleteErrorList = append(deleteErrorList, DeleteError{
Code: errorCodes[ErrAccessDenied].Code, Code: errorCodes[ErrAccessDenied].Code,
@ -699,22 +703,38 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
} }
requestList := []DeleteObjectsRequest{ requestList := []DeleteObjectsRequest{
{Quiet: false, Objects: getObjectIdentifierList(objectNames[:5])}, {Quiet: false, Objects: getObjectToDeleteList(objectNames[:5])},
{Quiet: true, Objects: getObjectIdentifierList(objectNames[5:])}, {Quiet: true, Objects: getObjectToDeleteList(objectNames[5:])},
} }
// generate multi objects delete response. // generate multi objects delete response.
successRequest0 := encodeResponse(requestList[0]) successRequest0 := encodeResponse(requestList[0])
successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, requestList[0].Objects, nil)
deletedObjects := make([]DeletedObject, len(requestList[0].Objects))
for i := range requestList[0].Objects {
deletedObjects[i] = DeletedObject{
ObjectName: requestList[0].Objects[i].ObjectName,
}
}
successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, deletedObjects, nil)
encodedSuccessResponse0 := encodeResponse(successResponse0) encodedSuccessResponse0 := encodeResponse(successResponse0)
successRequest1 := encodeResponse(requestList[1]) successRequest1 := encodeResponse(requestList[1])
successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil)
deletedObjects = make([]DeletedObject, len(requestList[1].Objects))
for i := range requestList[0].Objects {
deletedObjects[i] = DeletedObject{
ObjectName: requestList[1].Objects[i].ObjectName,
}
}
successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil)
encodedSuccessResponse1 := encodeResponse(successResponse1) encodedSuccessResponse1 := encodeResponse(successResponse1)
// generate multi objects delete response for errors. // generate multi objects delete response for errors.
// errorRequest := encodeResponse(requestList[1]) // errorRequest := encodeResponse(requestList[1])
errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil) errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, deletedObjects, nil)
encodedErrorResponse := encodeResponse(errorResponse) encodedErrorResponse := encodeResponse(errorResponse)
anonRequest := encodeResponse(requestList[0]) anonRequest := encodeResponse(requestList[0])
@ -817,6 +837,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
// Verify whether the bucket obtained object is same as the one created. // Verify whether the bucket obtained object is same as the one created.
if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) { if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) {
fmt.Println(string(testCase.expectedContent), string(actualContent))
t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType) t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType)
} }
} }

View File

@ -21,7 +21,6 @@ import (
) )
const ( const (
// Disabled means the lifecycle rule is inactive // Disabled means the lifecycle rule is inactive
Disabled = "Disabled" Disabled = "Disabled"
) )

View File

@ -49,13 +49,13 @@ func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int
return ErrNone return ErrNone
} }
// ListBucketObjectVersions - GET Bucket Object versions // ListObjectVersions - GET Bucket Object versions
// You can use the versions subresource to list metadata about all // You can use the versions subresource to list metadata about all
// of the versions of objects in a bucket. // of the versions of objects in a bucket.
func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) { func (api objectAPIHandlers) ListObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBucketObjectVersions") ctx := newContext(r, w, "ListObjectVersions")
defer logger.AuditLog(w, r, "ListBucketObjectVersions", mustGetClaimsFromToken(r)) defer logger.AuditLog(w, r, "ListObjectVersions", mustGetClaimsFromToken(r))
vars := mux.Vars(r) vars := mux.Vars(r)
bucket := vars["bucket"] bucket := vars["bucket"]
@ -74,8 +74,7 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit
urlValues := r.URL.Query() urlValues := r.URL.Query()
// Extract all the listBucketVersions query params to their native values. // Extract all the listBucketVersions query params to their native values.
// versionIDMarker is ignored here. prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode := getListBucketObjectVersionsArgs(urlValues)
prefix, marker, delimiter, maxkeys, encodingType, _, errCode := getListBucketObjectVersionsArgs(urlValues)
if errCode != ErrNone { if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return return
@ -87,29 +86,29 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit
return return
} }
listObjects := objectAPI.ListObjects listObjectVersions := objectAPI.ListObjectVersions
// Inititate a list objects operation based on the input params. // Inititate a list object versions operation based on the input params.
// On success would return back ListObjectsInfo object to be // On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header. // marshaled into S3 compatible XML header.
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxkeys) listObjectVersionsInfo, err := listObjectVersions(ctx, bucket, prefix, marker, versionIDMarker, delimiter, maxkeys)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
} }
for i := range listObjectsInfo.Objects { for i := range listObjectVersionsInfo.Objects {
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) { if crypto.IsEncrypted(listObjectVersionsInfo.Objects[i].UserDefined) {
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false) listObjectVersionsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectVersionsInfo.Objects[i], false)
} }
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize() listObjectVersionsInfo.Objects[i].Size, err = listObjectVersionsInfo.Objects[i].GetActualSize()
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return
} }
} }
response := generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType, maxkeys, listObjectsInfo) response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo)
// Write success response. // Write success response.
writeSuccessResponseXML(w, encodeResponse(response)) writeSuccessResponseXML(w, encodeResponse(response))

View File

@ -28,6 +28,7 @@ import (
"github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock" objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy" "github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/sync/errgroup" "github.com/minio/minio/pkg/sync/errgroup"
@ -111,6 +112,8 @@ func (sys *BucketMetadataSys) Update(bucket string, configFile string, configDat
meta.TaggingConfigXML = configData meta.TaggingConfigXML = configData
case objectLockConfig: case objectLockConfig:
meta.ObjectLockConfigXML = configData meta.ObjectLockConfigXML = configData
case bucketVersioningConfig:
meta.VersioningConfigXML = configData
case bucketQuotaConfigFile: case bucketQuotaConfigFile:
meta.QuotaConfigJSON = configData meta.QuotaConfigJSON = configData
default: default:
@ -147,6 +150,16 @@ func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
return meta, nil return meta, nil
} }
// GetVersioningConfig returns configured versioning config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Versioning, error) {
meta, err := sys.GetConfig(bucket)
if err != nil {
return nil, err
}
return meta.versioningConfig, nil
}
// GetTaggingConfig returns configured tagging config // GetTaggingConfig returns configured tagging config
// The returned object may not be modified. // The returned object may not be modified.
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) { func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) {

View File

@ -32,6 +32,7 @@ import (
"github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock" objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy" "github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
) )
@ -47,6 +48,7 @@ const (
var ( var (
enabledBucketObjectLockConfig = []byte(`<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled></ObjectLockConfiguration>`) enabledBucketObjectLockConfig = []byte(`<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled></ObjectLockConfiguration>`)
enabledBucketVersioningConfig = []byte(`<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>Enabled</Status></VersioningConfiguration>`)
) )
//go:generate msgp -file $GOFILE //go:generate msgp -file $GOFILE
@ -64,6 +66,7 @@ type BucketMetadata struct {
NotificationConfigXML []byte NotificationConfigXML []byte
LifecycleConfigXML []byte LifecycleConfigXML []byte
ObjectLockConfigXML []byte ObjectLockConfigXML []byte
VersioningConfigXML []byte
EncryptionConfigXML []byte EncryptionConfigXML []byte
TaggingConfigXML []byte TaggingConfigXML []byte
QuotaConfigJSON []byte QuotaConfigJSON []byte
@ -73,6 +76,7 @@ type BucketMetadata struct {
notificationConfig *event.Config notificationConfig *event.Config
lifecycleConfig *lifecycle.Lifecycle lifecycleConfig *lifecycle.Lifecycle
objectLockConfig *objectlock.Config objectLockConfig *objectlock.Config
versioningConfig *versioning.Versioning
sseConfig *bucketsse.BucketSSEConfig sseConfig *bucketsse.BucketSSEConfig
taggingConfig *tags.Tags taggingConfig *tags.Tags
quotaConfig *madmin.BucketQuota quotaConfig *madmin.BucketQuota
@ -87,6 +91,9 @@ func newBucketMetadata(name string) BucketMetadata {
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
}, },
quotaConfig: &madmin.BucketQuota{}, quotaConfig: &madmin.BucketQuota{},
versioningConfig: &versioning.Versioning{
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
},
} }
} }
@ -188,6 +195,13 @@ func (b *BucketMetadata) parseAllConfigs(ctx context.Context, objectAPI ObjectLa
b.objectLockConfig = nil b.objectLockConfig = nil
} }
if len(b.VersioningConfigXML) != 0 {
b.versioningConfig, err = versioning.ParseConfig(bytes.NewReader(b.VersioningConfigXML))
if err != nil {
return err
}
}
if len(b.QuotaConfigJSON) != 0 { if len(b.QuotaConfigJSON) != 0 {
b.quotaConfig, err = parseBucketQuota(b.Name, b.QuotaConfigJSON) b.quotaConfig, err = parseBucketQuota(b.Name, b.QuotaConfigJSON)
if err != nil { if err != nil {
@ -244,6 +258,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
case legacyBucketObjectLockEnabledConfigFile: case legacyBucketObjectLockEnabledConfigFile:
if string(configData) == legacyBucketObjectLockEnabledConfig { if string(configData) == legacyBucketObjectLockEnabledConfig {
b.ObjectLockConfigXML = enabledBucketObjectLockConfig b.ObjectLockConfigXML = enabledBucketObjectLockConfig
b.VersioningConfigXML = enabledBucketVersioningConfig
b.LockEnabled = false // legacy value unset it b.LockEnabled = false // legacy value unset it
// we are only interested in b.ObjectLockConfigXML // we are only interested in b.ObjectLockConfigXML
} }
@ -259,6 +274,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
b.TaggingConfigXML = configData b.TaggingConfigXML = configData
case objectLockConfig: case objectLockConfig:
b.ObjectLockConfigXML = configData b.ObjectLockConfigXML = configData
b.VersioningConfigXML = enabledBucketVersioningConfig
case bucketQuotaConfigFile: case bucketQuotaConfigFile:
b.QuotaConfigJSON = configData b.QuotaConfigJSON = configData
} }

View File

@ -66,6 +66,12 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "ObjectLockConfigXML") err = msgp.WrapError(err, "ObjectLockConfigXML")
return return
} }
case "VersioningConfigXML":
z.VersioningConfigXML, err = dc.ReadBytes(z.VersioningConfigXML)
if err != nil {
err = msgp.WrapError(err, "VersioningConfigXML")
return
}
case "EncryptionConfigXML": case "EncryptionConfigXML":
z.EncryptionConfigXML, err = dc.ReadBytes(z.EncryptionConfigXML) z.EncryptionConfigXML, err = dc.ReadBytes(z.EncryptionConfigXML)
if err != nil { if err != nil {
@ -97,9 +103,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable // EncodeMsg implements msgp.Encodable
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 10 // map header, size 11
// write "Name" // write "Name"
err = en.Append(0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65) err = en.Append(0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil { if err != nil {
return return
} }
@ -168,6 +174,16 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "ObjectLockConfigXML") err = msgp.WrapError(err, "ObjectLockConfigXML")
return return
} }
// write "VersioningConfigXML"
err = en.Append(0xb3, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
if err != nil {
return
}
err = en.WriteBytes(z.VersioningConfigXML)
if err != nil {
err = msgp.WrapError(err, "VersioningConfigXML")
return
}
// write "EncryptionConfigXML" // write "EncryptionConfigXML"
err = en.Append(0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) err = en.Append(0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
if err != nil { if err != nil {
@ -204,9 +220,9 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
// MarshalMsg implements msgp.Marshaler // MarshalMsg implements msgp.Marshaler
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize()) o = msgp.Require(b, z.Msgsize())
// map header, size 10 // map header, size 11
// string "Name" // string "Name"
o = append(o, 0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65) o = append(o, 0x8b, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name) o = msgp.AppendString(o, z.Name)
// string "Created" // string "Created"
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64) o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
@ -226,6 +242,9 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
// string "ObjectLockConfigXML" // string "ObjectLockConfigXML"
o = append(o, 0xb3, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) o = append(o, 0xb3, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.ObjectLockConfigXML) o = msgp.AppendBytes(o, z.ObjectLockConfigXML)
// string "VersioningConfigXML"
o = append(o, 0xb3, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.VersioningConfigXML)
// string "EncryptionConfigXML" // string "EncryptionConfigXML"
o = append(o, 0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c) o = append(o, 0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.EncryptionConfigXML) o = msgp.AppendBytes(o, z.EncryptionConfigXML)
@ -298,6 +317,12 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "ObjectLockConfigXML") err = msgp.WrapError(err, "ObjectLockConfigXML")
return return
} }
case "VersioningConfigXML":
z.VersioningConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.VersioningConfigXML)
if err != nil {
err = msgp.WrapError(err, "VersioningConfigXML")
return
}
case "EncryptionConfigXML": case "EncryptionConfigXML":
z.EncryptionConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.EncryptionConfigXML) z.EncryptionConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.EncryptionConfigXML)
if err != nil { if err != nil {
@ -330,6 +355,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketMetadata) Msgsize() (s int) { func (z *BucketMetadata) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON)
return return
} }

View File

@ -52,79 +52,6 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
return config.ToRetention(), nil return config.ToRetention(), nil
} }
// Similar to enforceRetentionBypassForDelete but for WebUI
func enforceRetentionBypassForDeleteWeb(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, govBypassPerms bool) APIErrorCode {
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return toAPIErrorCode(ctx, err)
}
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
return toAPIErrorCode(ctx, err)
}
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
return ErrObjectLocked
}
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
if ret.Mode.Valid() {
switch ret.Mode {
case objectlock.RetCompliance:
// In compliance mode, a protected object version can't be overwritten
// or deleted by any user, including the root user in your AWS account.
// When an object is locked in compliance mode, its retention mode can't
// be changed, and its retention period can't be shortened. Compliance mode
// ensures that an object version can't be overwritten or deleted for the
// duration of the retention period.
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
return ErrNone
case objectlock.RetGovernance:
// In governance mode, users can't overwrite or delete an object
// version or alter its lock settings unless they have special
// permissions. With governance mode, you protect objects against
// being deleted by most users, but you can still grant some users
// permission to alter the retention settings or delete the object
// if necessary. You can also use governance mode to test retention-period
// settings before creating a compliance-mode retention period.
// To override or remove governance-mode retention settings, a
// user must have the s3:BypassGovernanceRetention permission
// and must explicitly include x-amz-bypass-governance-retention:true
// as a request header with any request that requires overriding
// governance mode.
byPassSet := govBypassPerms && objectlock.IsObjectLockGovernanceBypassSet(r.Header)
if !byPassSet {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
if !govBypassPerms {
return ErrObjectLocked
}
return ErrNone
}
}
}
return ErrNone
}
// enforceRetentionForDeletion checks if it is appropriate to remove an // enforceRetentionForDeletion checks if it is appropriate to remove an
// object according to locking configuration when this is lifecycle/ bucket quota asking. // object according to locking configuration when this is lifecycle/ bucket quota asking.
func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) { func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) {
@ -153,14 +80,23 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR // For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
// governance bypass headers are set and user has governance bypass permissions. // governance bypass headers are set and user has governance bypass permissions.
// Objects in "Compliance" mode can be overwritten only if retention date is past. // Objects in "Compliance" mode can be overwritten only if retention date is past.
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn) APIErrorCode { func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket string, object ObjectToDelete, getObjectInfoFn GetObjectInfoFn) APIErrorCode {
opts, err := getOpts(ctx, r, bucket, object) opts, err := getOpts(ctx, r, bucket, object.ObjectName)
if err != nil { if err != nil {
return toAPIErrorCode(ctx, err) return toAPIErrorCode(ctx, err)
} }
oi, err := getObjectInfoFn(ctx, bucket, object, opts) opts.VersionID = object.VersionID
oi, err := getObjectInfoFn(ctx, bucket, object.ObjectName, opts)
if err != nil { if err != nil {
switch err.(type) {
case MethodNotAllowed: // This happens usually for a delete marker
if oi.DeleteMarker {
// Delete marker should be present and valid.
return ErrNone
}
}
return toAPIErrorCode(ctx, err) return toAPIErrorCode(ctx, err)
} }
@ -219,8 +155,8 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes // https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
// If you try to delete objects protected by governance mode and have s3:BypassGovernanceRetention // If you try to delete objects protected by governance mode and have s3:BypassGovernanceRetention
// or s3:GetBucketObjectLockConfiguration permissions, the operation will succeed. // or s3:GetBucketObjectLockConfiguration permissions, the operation will succeed.
govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object) govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object.ObjectName)
govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object) govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object.ObjectName)
if govBypassPerms1 != ErrNone && govBypassPerms2 != ErrNone { if govBypassPerms1 != ErrNone && govBypassPerms2 != ErrNone {
return ErrAccessDenied return ErrAccessDenied
} }
@ -331,30 +267,32 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
return mode, retainDate, legalHold, ErrNone return mode, retainDate, legalHold, ErrNone
} }
var objExists bool
opts, err := getOpts(ctx, r, bucket, object) opts, err := getOpts(ctx, r, bucket, object)
if err != nil { if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
} }
t, err := objectlock.UTCNowNTP() if opts.VersionID != "" {
if err != nil { if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
logger.LogIf(ctx, err) r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
return mode, retainDate, legalHold, ErrObjectLocked
}
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil { t, err := objectlock.UTCNowNTP()
objExists = true if err != nil {
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) logger.LogIf(ctx, err)
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) { return mode, retainDate, legalHold, ErrObjectLocked
return mode, retainDate, legalHold, ErrObjectLocked }
}
mode = r.Mode if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
retainDate = r.RetainUntilDate return mode, retainDate, legalHold, ErrObjectLocked
legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined) }
// Disallow overwriting an object on legal hold
if legalHold.Status == objectlock.LegalHoldOn { mode = r.Mode
return mode, retainDate, legalHold, ErrObjectLocked retainDate = r.RetainUntilDate
legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
// Disallow overwriting an object on legal hold
if legalHold.Status == objectlock.LegalHoldOn {
return mode, retainDate, legalHold, ErrObjectLocked
}
} }
} }
@ -374,9 +312,6 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
if err != nil { if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
} }
if objExists && retainDate.After(t) {
return mode, retainDate, legalHold, ErrObjectLocked
}
if retentionPermErr != ErrNone { if retentionPermErr != ErrNone {
return mode, retainDate, legalHold, retentionPermErr return mode, retainDate, legalHold, retentionPermErr
} }
@ -387,16 +322,14 @@ func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, obj
if retentionPermErr != ErrNone { if retentionPermErr != ErrNone {
return mode, retainDate, legalHold, retentionPermErr return mode, retainDate, legalHold, retentionPermErr
} }
t, err := objectlock.UTCNowNTP() t, err := objectlock.UTCNowNTP()
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return mode, retainDate, legalHold, ErrObjectLocked return mode, retainDate, legalHold, ErrObjectLocked
} }
// AWS S3 just creates a new version of object when an object is being overwritten.
if objExists && retainDate.After(t) { if !legalHoldRequested && retentionCfg.LockEnabled {
return mode, retainDate, legalHold, ErrObjectLocked
}
if !legalHoldRequested {
// inherit retention from bucket configuration // inherit retention from bucket configuration
return retentionCfg.Mode, objectlock.RetentionDate{Time: t.Add(retentionCfg.Validity)}, legalHold, ErrNone return retentionCfg.Mode, objectlock.RetentionDate{Time: t.Add(retentionCfg.Validity)}, legalHold, ErrNone
} }

View File

@ -164,7 +164,7 @@ func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *ht
} }
// Read bucket access policy. // Read bucket access policy.
config, err := globalBucketMetadataSys.GetPolicyConfig(bucket) config, err := globalPolicySys.Get(bucket)
if err != nil { if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return return

View File

@ -92,7 +92,7 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
} }
} }
// Wrapper for calling Put Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. // Wrapper for calling Put Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestPutBucketPolicyHandler(t *testing.T) { func TestPutBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"}) ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"})
} }
@ -102,7 +102,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T) {
bucketName1 := fmt.Sprintf("%s-1", bucketName) bucketName1 := fmt.Sprintf("%s-1", bucketName)
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, BucketOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -314,7 +314,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
} }
// Wrapper for calling Get Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. // Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestGetBucketPolicyHandler(t *testing.T) { func TestGetBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"}) ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"})
} }
@ -520,7 +520,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq) ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
} }
// Wrapper for calling Delete Bucket Policy HTTP handler tests for both XL multiple disks and single node setup. // Wrapper for calling Delete Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup.
func TestDeleteBucketPolicyHandler(t *testing.T) { func TestDeleteBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"}) ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"})
} }

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc. * MinIO Cloud Storage, (C) 2018,2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -70,6 +70,16 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
principalType = "User" principalType = "User"
} }
vid := r.URL.Query().Get("versionId")
if vid == "" {
if u, err := url.Parse(r.Header.Get(xhttp.AmzCopySource)); err == nil {
vid = u.Query().Get("versionId")
}
if vid == "" {
vid = r.Header.Get(xhttp.AmzCopySourceVersionID)
}
}
args := map[string][]string{ args := map[string][]string{
"CurrentTime": {currTime.Format(time.RFC3339)}, "CurrentTime": {currTime.Format(time.RFC3339)},
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)}, "EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
@ -80,6 +90,7 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
"principaltype": {principalType}, "principaltype": {principalType},
"userid": {username}, "userid": {username},
"username": {username}, "username": {username},
"versionid": {vid},
} }
if lc != "" { if lc != "" {
@ -142,7 +153,7 @@ func getConditionValues(r *http.Request, lc string, username string, claims map[
return args return args
} }
// PolicyToBucketAccessPolicy - converts policy.Policy to minio-go/policy.BucketAccessPolicy. // PolicyToBucketAccessPolicy converts a MinIO policy into a minio-go policy data structure.
func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.BucketAccessPolicy, error) { func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.BucketAccessPolicy, error) {
// Return empty BucketAccessPolicy for empty bucket policy. // Return empty BucketAccessPolicy for empty bucket policy.
if bucketPolicy == nil { if bucketPolicy == nil {

View File

@ -138,7 +138,7 @@ func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
case <-ctx.Done(): case <-ctx.Done():
return return
case <-time.NewTimer(bgQuotaInterval).C: case <-time.NewTimer(bgQuotaInterval).C:
logger.LogIf(ctx, enforceFIFOQuota(ctx, objAPI)) enforceFIFOQuota(ctx, objAPI)
} }
} }
@ -146,20 +146,22 @@ func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects // enforceFIFOQuota deletes objects in FIFO order until sufficient objects
// have been deleted so as to bring bucket usage within quota // have been deleted so as to bring bucket usage within quota
func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error { func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) {
// Turn off quota enforcement if data usage info is unavailable. // Turn off quota enforcement if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff { if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
return nil return
} }
buckets, err := objectAPI.ListBuckets(ctx) buckets, err := objectAPI.ListBuckets(ctx)
if err != nil { if err != nil {
return err logger.LogIf(ctx, err)
return
} }
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil { if err != nil {
return err logger.LogIf(ctx, err)
return
} }
for _, binfo := range buckets { for _, binfo := range buckets {
@ -196,7 +198,8 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error {
// Walk through all objects // Walk through all objects
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh); err != nil { if err := objectAPI.Walk(ctx, bucket, "", objInfoCh); err != nil {
return err logger.LogIf(ctx, err)
continue
} }
// reuse the fileScorer used by disk cache to score entries by // reuse the fileScorer used by disk cache to score entries by
@ -205,53 +208,61 @@ func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error {
// irrelevant. // irrelevant.
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1) scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
if err != nil { if err != nil {
return err logger.LogIf(ctx, err)
continue
} }
rcfg, _ := globalBucketObjectLockSys.Get(bucket) rcfg, _ := globalBucketObjectLockSys.Get(bucket)
for obj := range objInfoCh { for obj := range objInfoCh {
if obj.DeleteMarker {
// Delete markers are automatically added for FIFO purge.
scorer.addFileWithObjInfo(obj, 1)
continue
}
// skip objects currently under retention // skip objects currently under retention
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) { if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
continue continue
} }
scorer.addFile(obj.Name, obj.ModTime, obj.Size, 1) scorer.addFileWithObjInfo(obj, 1)
} }
var objects []string
numKeys := len(scorer.fileNames()) versioned := globalBucketVersioningSys.Enabled(bucket)
for i, key := range scorer.fileNames() {
objects = append(objects, key) var objects []ObjectToDelete
numKeys := len(scorer.fileObjInfos())
for i, obj := range scorer.fileObjInfos() {
objects = append(objects, ObjectToDelete{
ObjectName: obj.Name,
VersionID: obj.VersionID,
})
if len(objects) < maxDeleteList && (i < numKeys-1) { if len(objects) < maxDeleteList && (i < numKeys-1) {
// skip deletion until maxObjectList or end of slice // skip deletion until maxDeleteList or end of slice
continue continue
} }
if len(objects) == 0 { if len(objects) == 0 {
break break
} }
// Deletes a list of objects. // Deletes a list of objects.
deleteErrs, err := objectAPI.DeleteObjects(ctx, bucket, objects) _, deleteErrs := objectAPI.DeleteObjects(ctx, bucket, objects, ObjectOptions{
if err != nil { Versioned: versioned,
logger.LogIf(ctx, err) })
} else { for i := range deleteErrs {
for i := range deleteErrs { if deleteErrs[i] != nil {
if deleteErrs[i] != nil { logger.LogIf(ctx, deleteErrs[i])
logger.LogIf(ctx, deleteErrs[i]) continue
continue
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: ObjectInfo{
Name: objects[i],
},
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
})
} }
objects = nil
// Notify object deleted event.
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: obj,
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
})
} }
objects = nil
} }
} }
return nil
} }

View File

@ -0,0 +1,128 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"encoding/xml"
"io"
"net/http"
humanize "github.com/dustin/go-humanize"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/versioning"
)
const (
bucketVersioningConfig = "versioning.xml"
// Maximum size of bucket versioning configuration payload sent to the PutBucketVersioningHandler.
maxBucketVersioningConfigSize = 1 * humanize.MiByte
)
// PutBucketVersioningHandler - PUT Bucket Versioning.
// ----------
func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketVersioning")
defer logger.AuditLog(w, r, "PutBucketVersioning", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// PutBucketVersioning API requires Content-Md5
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketVersioningAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
v, err := versioning.ParseConfig(io.LimitReader(r.Body, maxBucketVersioningConfigSize))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(v)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketVersioningConfig, configData); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
writeSuccessResponseHeadersOnly(w)
}
// GetBucketVersioningHandler - GET Bucket Versioning.
// ----------
func (api objectAPIHandlers) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketVersioning")
defer logger.AuditLog(w, r, "GetBucketVersioning", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketVersioningAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
config, err := globalBucketVersioningSys.Get(bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write bucket versioning configuration to client
writeSuccessResponseXML(w, configData)
}

57
cmd/bucket-versioning.go Normal file
View File

@ -0,0 +1,57 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import "github.com/minio/minio/pkg/bucket/versioning"
// BucketVersioningSys - policy subsystem.
type BucketVersioningSys struct{}
// Enabled enabled versioning?
func (sys *BucketVersioningSys) Enabled(bucket string) bool {
vc, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
if err != nil {
return false
}
return vc.Enabled()
}
// Suspended suspended versioning?
func (sys *BucketVersioningSys) Suspended(bucket string) bool {
vc, err := globalBucketMetadataSys.GetVersioningConfig(bucket)
if err != nil {
return false
}
return vc.Suspended()
}
// Get returns stored bucket policy
func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, error) {
if globalIsGateway {
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return nil, NotImplemented{}
}
return globalBucketMetadataSys.GetVersioningConfig(bucket)
}
// NewBucketVersioningSys - creates new versioning system.
func NewBucketVersioningSys() *BucketVersioningSys {
return &BucketVersioningSys{}
}

View File

@ -50,7 +50,7 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
} }
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error { func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile) _, err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile, ObjectOptions{})
if err != nil && isErrObjectNotFound(err) { if err != nil && isErrObjectNotFound(err) {
return errConfigNotFound return errConfigNotFound
} }

View File

@ -59,7 +59,7 @@ func initHelp() {
for k, v := range notify.DefaultNotificationKVS { for k, v := range notify.DefaultNotificationKVS {
kvs[k] = v kvs[k] = v
} }
if globalIsXL { if globalIsErasure {
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
} }
config.RegisterDefaultKVS(kvs) config.RegisterDefaultKVS(kvs)
@ -168,7 +168,7 @@ func initHelp() {
}, },
} }
if globalIsXL { if globalIsErasure {
helpSubSys = append(helpSubSys, config.HelpKV{}) helpSubSys = append(helpSubSys, config.HelpKV{})
copy(helpSubSys[2:], helpSubSys[1:]) copy(helpSubSys[2:], helpSubSys[1:])
helpSubSys[1] = config.HelpKV{ helpSubSys[1] = config.HelpKV{
@ -232,9 +232,9 @@ func validateConfig(s config.Config) error {
return err return err
} }
if globalIsXL { if globalIsErasure {
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
globalXLSetDriveCount); err != nil { globalErasureSetDriveCount); err != nil {
return err return err
} }
} }
@ -367,9 +367,9 @@ func lookupConfigs(s config.Config) {
globalAPIConfig.init(apiConfig) globalAPIConfig.init(apiConfig)
if globalIsXL { if globalIsErasure {
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
globalXLSetDriveCount) globalErasureSetDriveCount)
if err != nil { if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err)) logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
} }

View File

@ -92,7 +92,8 @@ func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData b
func delServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) error { func delServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) error {
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix) historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix)
return objAPI.DeleteObject(ctx, minioMetaBucket, historyFile) _, err := objAPI.DeleteObject(ctx, minioMetaBucket, historyFile, ObjectOptions{})
return err
} }
func readServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) ([]byte, error) { func readServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) ([]byte, error) {

View File

@ -45,7 +45,7 @@ func mustGetNodeName(endpointZones EndpointZones) (nodeName string) {
if err != nil { if err != nil {
logger.FatalIf(err, "Unable to start console logging subsystem") logger.FatalIf(err, "Unable to start console logging subsystem")
} }
if globalIsDistXL { if globalIsDistErasure {
nodeName = host.Name nodeName = host.Name
} }
return nodeName return nodeName

View File

@ -32,7 +32,9 @@ func writeCopyPartErr(ctx context.Context, w http.ResponseWriter, err error, url
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource), url, browser) writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource), url, browser)
return return
default: default:
writeErrorResponse(ctx, w, toAPIError(ctx, err), url, browser) apiErr := errorCodes.ToAPIErr(ErrInvalidCopyPartRangeSource)
apiErr.Description = err.Error()
writeErrorResponse(ctx, w, apiErr, url, browser)
return return
} }
} }

View File

@ -28,7 +28,6 @@ import (
"time" "time"
"github.com/minio/minio/cmd/config" "github.com/minio/minio/cmd/config"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/color" "github.com/minio/minio/pkg/color"
@ -512,7 +511,6 @@ func (i *crawlItem) transformMetaDir() {
type actionMeta struct { type actionMeta struct {
oi ObjectInfo oi ObjectInfo
trustOI bool // Set true if oi can be trusted and has been read with quorum. trustOI bool // Set true if oi can be trusted and has been read with quorum.
meta map[string]string
} }
// applyActions will apply lifecycle checks on to a scanned item. // applyActions will apply lifecycle checks on to a scanned item.
@ -528,7 +526,16 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
return size return size
} }
action := i.lifeCycle.ComputeAction(i.objectPath(), meta.meta[xhttp.AmzObjectTagging], meta.oi.ModTime) versionID := meta.oi.VersionID
action := i.lifeCycle.ComputeAction(
lifecycle.ObjectOpts{
Name: i.objectPath(),
UserTags: meta.oi.UserTags,
ModTime: meta.oi.ModTime,
VersionID: meta.oi.VersionID,
DeleteMarker: meta.oi.DeleteMarker,
IsLatest: meta.oi.IsLatest,
})
if i.debug { if i.debug {
logger.Info(color.Green("applyActions:")+" lifecycle: %q, Initial scan: %v", i.objectPath(), action) logger.Info(color.Green("applyActions:")+" lifecycle: %q, Initial scan: %v", i.objectPath(), action)
} }
@ -542,19 +549,42 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
// These (expensive) operations should only run on items we are likely to delete. // These (expensive) operations should only run on items we are likely to delete.
// Load to ensure that we have the correct version and not an unsynced version. // Load to ensure that we have the correct version and not an unsynced version.
if !meta.trustOI { if !meta.trustOI {
obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{}) obj, err := o.GetObjectInfo(ctx, i.bucket, i.objectPath(), ObjectOptions{
VersionID: versionID,
})
if err != nil { if err != nil {
// Do nothing - heal in the future. switch err.(type) {
logger.LogIf(ctx, err) case MethodNotAllowed: // This happens usually for a delete marker
return size if !obj.DeleteMarker { // if this is not a delete marker log and return
// Do nothing - heal in the future.
logger.LogIf(ctx, err)
return size
}
case ObjectNotFound:
// object not found return 0
return 0
default:
// All other errors proceed.
logger.LogIf(ctx, err)
return size
}
} }
size = obj.Size size = obj.Size
// Recalculate action. // Recalculate action.
action = i.lifeCycle.ComputeAction(i.objectPath(), obj.UserTags, obj.ModTime) action = i.lifeCycle.ComputeAction(
lifecycle.ObjectOpts{
Name: i.objectPath(),
UserTags: obj.UserTags,
ModTime: obj.ModTime,
VersionID: obj.VersionID,
DeleteMarker: obj.DeleteMarker,
IsLatest: obj.IsLatest,
})
if i.debug { if i.debug {
logger.Info(color.Green("applyActions:")+" lifecycle: Secondary scan: %v", action) logger.Info(color.Green("applyActions:")+" lifecycle: Secondary scan: %v", action)
} }
versionID = obj.VersionID
switch action { switch action {
case lifecycle.DeleteAction: case lifecycle.DeleteAction:
default: default:
@ -563,7 +593,7 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
} }
} }
err = o.DeleteObject(ctx, i.bucket, i.objectPath()) obj, err := o.DeleteObject(ctx, i.bucket, i.objectPath(), ObjectOptions{VersionID: versionID})
if err != nil { if err != nil {
// Assume it is still there. // Assume it is still there.
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
@ -574,10 +604,8 @@ func (i *crawlItem) applyActions(ctx context.Context, o ObjectLayer, meta action
sendEvent(eventArgs{ sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete, EventName: event.ObjectRemovedDelete,
BucketName: i.bucket, BucketName: i.bucket,
Object: ObjectInfo{ Object: obj,
Name: i.objectPath(), Host: "Internal: [ILM-EXPIRY]",
},
Host: "Internal: [ILM-EXPIRY]",
}) })
return 0 return 0
} }

View File

@ -60,7 +60,7 @@ type CacheChecksumInfoV1 struct {
// Represents the cache metadata struct // Represents the cache metadata struct
type cacheMeta struct { type cacheMeta struct {
Version string `json:"version"` Version string `json:"version"`
Stat statInfo `json:"stat"` // Stat of the current object `cache.json`. Stat StatInfo `json:"stat"` // Stat of the current object `cache.json`.
// checksums of blocks on disk. // checksums of blocks on disk.
Checksum CacheChecksumInfoV1 `json:"checksum,omitempty"` Checksum CacheChecksumInfoV1 `json:"checksum,omitempty"`
@ -553,7 +553,7 @@ func (c *diskCache) bitrotWriteToCache(cachePath, fileName string, reader io.Rea
} }
f, err := os.Create(filePath) f, err := os.Create(filePath)
if err != nil { if err != nil {
return 0, osErrToFSFileErr(err) return 0, osErrToFileErr(err)
} }
defer f.Close() defer f.Close()

View File

@ -187,12 +187,12 @@ func readCacheFileStream(filePath string, offset, length int64) (io.ReadCloser,
fr, err := os.Open(filePath) fr, err := os.Open(filePath)
if err != nil { if err != nil {
return nil, osErrToFSFileErr(err) return nil, osErrToFileErr(err)
} }
// Stat to get the size of the file at path. // Stat to get the size of the file at path.
st, err := fr.Stat() st, err := fr.Stat()
if err != nil { if err != nil {
err = osErrToFSFileErr(err) err = osErrToFileErr(err)
return nil, err return nil, err
} }
@ -298,9 +298,10 @@ type fileScorer struct {
} }
type queuedFile struct { type queuedFile struct {
name string name string
size uint64 versionID string
score float64 size uint64
score float64
} }
// newFileScorer allows to collect files to save a specific number of bytes. // newFileScorer allows to collect files to save a specific number of bytes.
@ -321,15 +322,33 @@ func newFileScorer(saveBytes uint64, now int64, maxHits int) (*fileScorer, error
return &f, nil return &f, nil
} }
func (f *fileScorer) addFile(name string, lastAccess time.Time, size int64, hits int) { func (f *fileScorer) addFile(name string, accTime time.Time, size int64, hits int) {
f.addFileWithObjInfo(ObjectInfo{
Name: name,
AccTime: accTime,
Size: size,
}, hits)
}
func (f *fileScorer) addFileWithObjInfo(objInfo ObjectInfo, hits int) {
// Calculate how much we want to delete this object. // Calculate how much we want to delete this object.
file := queuedFile{ file := queuedFile{
name: name, name: objInfo.Name,
size: uint64(size), versionID: objInfo.VersionID,
size: uint64(objInfo.Size),
} }
score := float64(f.now - lastAccess.Unix())
var score float64
if objInfo.ModTime.IsZero() {
// Mod time is not available with disk cache use atime.
score = float64(f.now - objInfo.AccTime.Unix())
} else {
// if not used mod time when mod time is available.
score = float64(f.now - objInfo.ModTime.Unix())
}
// Size as fraction of how much we want to save, 0->1. // Size as fraction of how much we want to save, 0->1.
szWeight := math.Max(0, (math.Min(1, float64(size)*f.sizeMult))) szWeight := math.Max(0, (math.Min(1, float64(file.size)*f.sizeMult)))
// 0 at f.maxHits, 1 at 0. // 0 at f.maxHits, 1 at 0.
hitsWeight := (1.0 - math.Max(0, math.Min(1.0, float64(hits)/float64(f.maxHits)))) hitsWeight := (1.0 - math.Max(0, math.Min(1.0, float64(hits)/float64(f.maxHits))))
file.score = score * (1 + 0.25*szWeight + 0.25*hitsWeight) file.score = score * (1 + 0.25*szWeight + 0.25*hitsWeight)
@ -404,6 +423,22 @@ func (f *fileScorer) trimQueue() {
} }
} }
// fileObjInfos returns all queued file object infos
func (f *fileScorer) fileObjInfos() []ObjectInfo {
res := make([]ObjectInfo, 0, f.queue.Len())
e := f.queue.Front()
for e != nil {
qfile := e.Value.(queuedFile)
res = append(res, ObjectInfo{
Name: qfile.name,
Size: int64(qfile.size),
VersionID: qfile.versionID,
})
e = e.Next()
}
return res
}
// fileNames returns all queued file names. // fileNames returns all queued file names.
func (f *fileScorer) fileNames() []string { func (f *fileScorer) fileNames() []string {
res := make([]string, 0, f.queue.Len()) res := make([]string, 0, f.queue.Len())

View File

@ -51,8 +51,8 @@ type CacheObjectLayer interface {
// Object operations. // Object operations.
GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteObject(ctx context.Context, bucket, object string) error DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error)
DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error)
PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObject(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
// Storage operations. // Storage operations.
@ -78,8 +78,7 @@ type cacheObjects struct {
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteObjectFn func(ctx context.Context, bucket, object string) error DeleteObjectFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
DeleteObjectsFn func(ctx context.Context, bucket string, objects []string) ([]error, error)
PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) PutObjectFn func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error)
CopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) CopyObjectFn func(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error)
} }
@ -120,8 +119,8 @@ func (c *cacheObjects) updateMetadataIfChanged(ctx context.Context, dcache *disk
} }
// DeleteObject clears cache entry if backend delete operation succeeds // DeleteObject clears cache entry if backend delete operation succeeds
func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string) (err error) { func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if err = c.DeleteObjectFn(ctx, bucket, object); err != nil { if objInfo, err = c.DeleteObjectFn(ctx, bucket, object, opts); err != nil {
return return
} }
if c.isCacheExclude(bucket, object) || c.skipCache() { if c.isCacheExclude(bucket, object) || c.skipCache() {
@ -130,19 +129,38 @@ func (c *cacheObjects) DeleteObject(ctx context.Context, bucket, object string)
dcache, cerr := c.getCacheLoc(bucket, object) dcache, cerr := c.getCacheLoc(bucket, object)
if cerr != nil { if cerr != nil {
return return objInfo, cerr
} }
dcache.Delete(ctx, bucket, object) dcache.Delete(ctx, bucket, object)
return return
} }
// DeleteObjects batch deletes objects in slice, and clears any cached entries // DeleteObjects batch deletes objects in slice, and clears any cached entries
func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { func (c *cacheObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
errs := make([]error, len(objects)) errs := make([]error, len(objects))
objInfos := make([]ObjectInfo, len(objects))
for idx, object := range objects { for idx, object := range objects {
errs[idx] = c.DeleteObject(ctx, bucket, object) opts.VersionID = object.VersionID
objInfos[idx], errs[idx] = c.DeleteObject(ctx, bucket, object.ObjectName, opts)
} }
return errs, nil deletedObjects := make([]DeletedObject, len(objInfos))
for idx := range errs {
if errs[idx] != nil {
continue
}
if objInfos[idx].DeleteMarker {
deletedObjects[idx] = DeletedObject{
DeleteMarker: objInfos[idx].DeleteMarker,
DeleteMarkerVersionID: objInfos[idx].VersionID,
}
continue
}
deletedObjects[idx] = DeletedObject{
ObjectName: objInfos[idx].Name,
VersionID: objInfos[idx].VersionID,
}
}
return deletedObjects, errs
} }
// construct a metadata k-v map // construct a metadata k-v map
@ -649,15 +667,8 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec
GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { GetObjectNInfoFn: func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) return newObjectLayerFn().GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
}, },
DeleteObjectFn: func(ctx context.Context, bucket, object string) error { DeleteObjectFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
return newObjectLayerFn().DeleteObject(ctx, bucket, object) return newObjectLayerFn().DeleteObject(ctx, bucket, object, opts)
},
DeleteObjectsFn: func(ctx context.Context, bucket string, objects []string) ([]error, error) {
errs := make([]error, len(objects))
for idx, object := range objects {
errs[idx] = newObjectLayerFn().DeleteObject(ctx, bucket, object)
}
return errs, nil
}, },
PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { PutObjectFn: func(ctx context.Context, bucket, object string, data *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) {
return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts) return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts)

View File

@ -31,6 +31,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/google/uuid"
"github.com/minio/minio-go/v6/pkg/encrypt" "github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio/cmd/crypto" "github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
@ -82,7 +83,7 @@ func isEncryptedMultipart(objInfo ObjectInfo) bool {
} }
} }
// Further check if this object is uploaded using multipart mechanism // Further check if this object is uploaded using multipart mechanism
// by the user and it is not about XL internally splitting the // by the user and it is not about Erasure internally splitting the
// object into parts in PutObject() // object into parts in PutObject()
return !(objInfo.backendType == BackendErasure && len(objInfo.ETag) == 32) return !(objInfo.backendType == BackendErasure && len(objInfo.ETag) == 32)
} }
@ -859,6 +860,7 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str
var clientKey [32]byte var clientKey [32]byte
var sse encrypt.ServerSide var sse encrypt.ServerSide
opts = ObjectOptions{UserDefined: metadata}
if copySource { if copySource {
if crypto.SSECopy.IsRequested(header) { if crypto.SSECopy.IsRequested(header) {
clientKey, err = crypto.SSECopy.ParseHTTP(header) clientKey, err = crypto.SSECopy.ParseHTTP(header)
@ -868,7 +870,8 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return return
} }
return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(sse), UserDefined: metadata}, nil opts.ServerSideEncryption = encrypt.SSECopy(sse)
return
} }
return return
} }
@ -881,12 +884,13 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str
if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil {
return return
} }
return ObjectOptions{ServerSideEncryption: sse, UserDefined: metadata}, nil opts.ServerSideEncryption = sse
return
} }
if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) { if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) {
return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil opts.ServerSideEncryption = encrypt.NewSSE()
} }
return ObjectOptions{UserDefined: metadata}, nil return
} }
// get ObjectOptions for GET calls from encryption headers // get ObjectOptions for GET calls from encryption headers
@ -908,6 +912,19 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
} }
} }
vid := strings.TrimSpace(r.URL.Query().Get("versionId"))
if vid != "" && vid != nullVersionID {
_, err := uuid.Parse(vid)
if err != nil {
logger.LogIf(ctx, err)
return opts, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: vid,
}
}
}
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
key, err := crypto.SSEC.ParseHTTP(r.Header) key, err := crypto.SSEC.ParseHTTP(r.Header)
if err != nil { if err != nil {
@ -916,7 +933,11 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
derivedKey := deriveClientKey(key, bucket, object) derivedKey := deriveClientKey(key, bucket, object)
encryption, err = encrypt.NewSSEC(derivedKey[:]) encryption, err = encrypt.NewSSEC(derivedKey[:])
logger.CriticalIf(ctx, err) logger.CriticalIf(ctx, err)
return ObjectOptions{ServerSideEncryption: encryption, PartNumber: partNumber}, nil return ObjectOptions{
ServerSideEncryption: encryption,
VersionID: vid,
PartNumber: partNumber,
}, nil
} }
// default case of passing encryption headers to backend // default case of passing encryption headers to backend
@ -925,18 +946,21 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
return opts, err return opts, err
} }
opts.PartNumber = partNumber opts.PartNumber = partNumber
opts.VersionID = vid
return opts, nil return opts, nil
} }
// get ObjectOptions for PUT calls from encryption headers and metadata // get ObjectOptions for PUT calls from encryption headers and metadata
func putOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) { func putOpts(ctx context.Context, r *http.Request, bucket, object string, metadata map[string]string) (opts ObjectOptions, err error) {
versioned := globalBucketVersioningSys.Enabled(bucket)
// In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it // In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it
// is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls // is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls
if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) { if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) {
return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata}, nil return ObjectOptions{ServerSideEncryption: encrypt.NewSSE(), UserDefined: metadata, Versioned: versioned}, nil
} }
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
opts, err = getOpts(ctx, r, bucket, object) opts, err = getOpts(ctx, r, bucket, object)
opts.Versioned = versioned
opts.UserDefined = metadata opts.UserDefined = metadata
return return
} }
@ -949,10 +973,15 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada
if err != nil { if err != nil {
return ObjectOptions{}, err return ObjectOptions{}, err
} }
return ObjectOptions{ServerSideEncryption: sseKms, UserDefined: metadata}, nil return ObjectOptions{ServerSideEncryption: sseKms, UserDefined: metadata, Versioned: versioned}, nil
} }
// default case of passing encryption headers and UserDefined metadata to backend // default case of passing encryption headers and UserDefined metadata to backend
return getDefaultOpts(r.Header, false, metadata) opts, err = getDefaultOpts(r.Header, false, metadata)
if err != nil {
return opts, err
}
opts.Versioned = versioned
return opts, nil
} }
// get ObjectOptions for Copy calls with encryption headers provided on the target side and source side metadata // get ObjectOptions for Copy calls with encryption headers provided on the target side and source side metadata
@ -981,5 +1010,9 @@ func copySrcOpts(ctx context.Context, r *http.Request, bucket, object string) (O
} }
// default case of passing encryption headers to backend // default case of passing encryption headers to backend
return getDefaultOpts(r.Header, true, nil) opts, err := getDefaultOpts(r.Header, false, nil)
if err != nil {
return opts, err
}
return opts, nil
} }

View File

@ -547,9 +547,9 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("invalid number of endpoints") return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("invalid number of endpoints")
} }
// Return XL setup when all endpoints are path style. // Return Erasure setup when all endpoints are path style.
if endpoints[0].Type() == PathEndpointType { if endpoints[0].Type() == PathEndpointType {
setupType = XLSetupType setupType = ErasureSetupType
return endpoints, setupType, nil return endpoints, setupType, nil
} }
@ -614,18 +614,18 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp
// All endpoints are pointing to local host // All endpoints are pointing to local host
if len(endpoints) == localEndpointCount { if len(endpoints) == localEndpointCount {
// If all endpoints have same port number, Just treat it as distXL setup // If all endpoints have same port number, Just treat it as distErasure setup
// using URL style endpoints. // using URL style endpoints.
if len(localPortSet) == 1 { if len(localPortSet) == 1 {
if len(localServerHostSet) > 1 { if len(localServerHostSet) > 1 {
return endpoints, setupType, return endpoints, setupType,
config.ErrInvalidErasureEndpoints(nil).Msg("all local endpoints should not have different hostnames/ips") config.ErrInvalidErasureEndpoints(nil).Msg("all local endpoints should not have different hostnames/ips")
} }
return endpoints, DistXLSetupType, nil return endpoints, DistErasureSetupType, nil
} }
// Even though all endpoints are local, but those endpoints use different ports. // Even though all endpoints are local, but those endpoints use different ports.
// This means it is DistXL setup. // This means it is DistErasure setup.
} }
// Add missing port in all endpoints. // Add missing port in all endpoints.
@ -645,7 +645,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp
} }
// Error out if we have less than 2 unique servers. // Error out if we have less than 2 unique servers.
if len(uniqueArgs.ToSlice()) < 2 && setupType == DistXLSetupType { if len(uniqueArgs.ToSlice()) < 2 && setupType == DistErasureSetupType {
err := fmt.Errorf("Unsupported number of endpoints (%s), minimum number of servers cannot be less than 2 in distributed setup", endpoints) err := fmt.Errorf("Unsupported number of endpoints (%s), minimum number of servers cannot be less than 2 in distributed setup", endpoints)
return endpoints, setupType, err return endpoints, setupType, err
} }
@ -655,7 +655,7 @@ func CreateEndpoints(serverAddr string, foundLocal bool, args ...[]string) (Endp
updateDomainIPs(uniqueArgs) updateDomainIPs(uniqueArgs)
} }
setupType = DistXLSetupType setupType = DistErasureSetupType
return endpoints, setupType, nil return endpoints, setupType, nil
} }

View File

@ -232,71 +232,71 @@ func TestCreateEndpoints(t *testing.T) {
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, FSSetupType, nil}, {"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}}, FSSetupType, nil},
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")}, {"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
// XL Setup with PathEndpointType // Erasure Setup with PathEndpointType
{":1234", [][]string{{"/d1", "/d2", "/d3", "/d4"}}, ":1234", {":1234", [][]string{{"/d1", "/d2", "/d3", "/d4"}}, ":1234",
Endpoints{ Endpoints{
Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d1")}, IsLocal: true},
Endpoint{URL: &url.URL{Path: mustAbs("/d2")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d2")}, IsLocal: true},
Endpoint{URL: &url.URL{Path: mustAbs("/d3")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d3")}, IsLocal: true},
Endpoint{URL: &url.URL{Path: mustAbs("/d4")}, IsLocal: true}, Endpoint{URL: &url.URL{Path: mustAbs("/d4")}, IsLocal: true},
}, XLSetupType, nil}, }, ErasureSetupType, nil},
// DistXL Setup with URLEndpointType // DistErasure Setup with URLEndpointType
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{ {":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true},
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d2"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d2"}, IsLocal: true},
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d3"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d3"}, IsLocal: true},
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d4"}, IsLocal: true}, Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d4"}, IsLocal: true},
}, DistXLSetupType, nil}, }, DistErasureSetupType, nil},
// DistXL Setup with URLEndpointType having mixed naming to local host. // DistErasure Setup with URLEndpointType having mixed naming to local host.
{"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, "", Endpoints{}, -1, fmt.Errorf("all local endpoints should not have different hostnames/ips")}, {"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, "", Endpoints{}, -1, fmt.Errorf("all local endpoints should not have different hostnames/ips")},
{":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")}, {":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")},
{":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")}, {":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")},
// DistXL type // DistErasure type
{"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{ {"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{
Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0]}, Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0]},
Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1]}, Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1]},
Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2]}, Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2]},
Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3]}, Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3]},
}, DistXLSetupType, nil}, }, DistErasureSetupType, nil},
{"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{ {"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{
Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0]}, Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0]},
Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1]}, Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1]},
Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2]}, Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2]},
Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3]}, Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3]},
}, DistXLSetupType, nil}, }, DistErasureSetupType, nil},
{":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", Endpoints{ {":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", Endpoints{
Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0]}, Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0]},
Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1]}, Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1]},
Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2]}, Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2]},
Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3]}, Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3]},
}, DistXLSetupType, nil}, }, DistErasureSetupType, nil},
{":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", Endpoints{ {":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", Endpoints{
Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0]}, Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0]},
Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1]}, Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1]},
Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2]}, Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2]},
Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3]}, Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3]},
}, DistXLSetupType, nil}, }, DistErasureSetupType, nil},
{":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", Endpoints{ {":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", Endpoints{
Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0]}, Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0]},
Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1]}, Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1]},
Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2]}, Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2]},
Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3]}, Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3]},
}, DistXLSetupType, nil}, }, DistErasureSetupType, nil},
// DistXL Setup using only local host. // DistErasure Setup using only local host.
{":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", Endpoints{ {":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", Endpoints{
Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0]}, Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0]},
Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1]}, Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1]},
Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2]}, Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2]},
Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3]}, Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3]},
}, DistXLSetupType, nil}, }, DistErasureSetupType, nil},
} }
for _, testCase := range testCases { for _, testCase := range testCases {

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -22,7 +22,6 @@ import (
"github.com/minio/minio-go/v6/pkg/s3utils" "github.com/minio/minio-go/v6/pkg/s3utils"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/sync/errgroup" "github.com/minio/minio/pkg/sync/errgroup"
) )
@ -35,13 +34,13 @@ var bucketMetadataOpIgnoredErrs = append(bucketOpIgnoredErrs, errVolumeNotFound)
/// Bucket operations /// Bucket operations
// MakeBucket - make a bucket. // MakeBucket - make a bucket.
func (xl xlObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { func (er erasureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil { if err := s3utils.CheckValidBucketNameStrict(bucket); err != nil {
return BucketNameInvalid{Bucket: bucket} return BucketNameInvalid{Bucket: bucket}
} }
storageDisks := xl.getDisks() storageDisks := er.getDisks()
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
@ -86,9 +85,9 @@ func undoDeleteBucket(storageDisks []StorageAPI, bucket string) {
} }
// getBucketInfo - returns the BucketInfo from one of the load balanced disks. // getBucketInfo - returns the BucketInfo from one of the load balanced disks.
func (xl xlObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) { func (er erasureObjects) getBucketInfo(ctx context.Context, bucketName string) (bucketInfo BucketInfo, err error) {
var bucketErrs []error var bucketErrs []error
for _, disk := range xl.getLoadBalancedDisks() { for _, disk := range er.getLoadBalancedDisks() {
if disk == nil { if disk == nil {
bucketErrs = append(bucketErrs, errDiskNotFound) bucketErrs = append(bucketErrs, errDiskNotFound)
continue continue
@ -110,13 +109,13 @@ func (xl xlObjects) getBucketInfo(ctx context.Context, bucketName string) (bucke
// reduce to one error based on read quorum. // reduce to one error based on read quorum.
// `nil` is deliberately passed for ignoredErrs // `nil` is deliberately passed for ignoredErrs
// because these errors were already ignored. // because these errors were already ignored.
readQuorum := getReadQuorum(len(xl.getDisks())) readQuorum := getReadQuorum(len(er.getDisks()))
return BucketInfo{}, reduceReadQuorumErrs(ctx, bucketErrs, nil, readQuorum) return BucketInfo{}, reduceReadQuorumErrs(ctx, bucketErrs, nil, readQuorum)
} }
// GetBucketInfo - returns BucketInfo for a bucket. // GetBucketInfo - returns BucketInfo for a bucket.
func (xl xlObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) { func (er erasureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) {
bucketInfo, err := xl.getBucketInfo(ctx, bucket) bucketInfo, err := er.getBucketInfo(ctx, bucket)
if err != nil { if err != nil {
return bi, toObjectErr(err, bucket) return bi, toObjectErr(err, bucket)
} }
@ -124,8 +123,8 @@ func (xl xlObjects) GetBucketInfo(ctx context.Context, bucket string) (bi Bucket
} }
// listBuckets - returns list of all buckets from a disk picked at random. // listBuckets - returns list of all buckets from a disk picked at random.
func (xl xlObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, err error) { func (er erasureObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo, err error) {
for _, disk := range xl.getLoadBalancedDisks() { for _, disk := range er.getLoadBalancedDisks() {
if disk == nil { if disk == nil {
continue continue
} }
@ -161,8 +160,8 @@ func (xl xlObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketInfo,
} }
// ListBuckets - lists all the buckets, sorted by its name. // ListBuckets - lists all the buckets, sorted by its name.
func (xl xlObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) { func (er erasureObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
bucketInfos, err := xl.listBuckets(ctx) bucketInfos, err := er.listBuckets(ctx)
if err != nil { if err != nil {
return nil, toObjectErr(err) return nil, toObjectErr(err)
} }
@ -196,9 +195,9 @@ func deleteDanglingBucket(ctx context.Context, storageDisks []StorageAPI, dErrs
} }
// DeleteBucket - deletes a bucket. // DeleteBucket - deletes a bucket.
func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { func (er erasureObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
// Collect if all disks report volume not found. // Collect if all disks report volume not found.
storageDisks := xl.getDisks() storageDisks := er.getDisks()
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
@ -235,7 +234,7 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete
writeQuorum := getWriteQuorum(len(storageDisks)) writeQuorum := getWriteQuorum(len(storageDisks))
err := reduceWriteQuorumErrs(ctx, dErrs, bucketOpIgnoredErrs, writeQuorum) err := reduceWriteQuorumErrs(ctx, dErrs, bucketOpIgnoredErrs, writeQuorum)
if err == errXLWriteQuorum { if err == errErasureWriteQuorum {
undoDeleteBucket(storageDisks, bucket) undoDeleteBucket(storageDisks, bucket)
} }
if err != nil { if err != nil {
@ -251,25 +250,26 @@ func (xl xlObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete
} }
// IsNotificationSupported returns whether bucket notification is applicable for this layer. // IsNotificationSupported returns whether bucket notification is applicable for this layer.
func (xl xlObjects) IsNotificationSupported() bool { func (er erasureObjects) IsNotificationSupported() bool {
return true return true
} }
// IsListenBucketSupported returns whether listen bucket notification is applicable for this layer. // IsListenBucketSupported returns whether listen bucket notification is applicable for this layer.
func (xl xlObjects) IsListenBucketSupported() bool { func (er erasureObjects) IsListenBucketSupported() bool {
return true return true
} }
// IsEncryptionSupported returns whether server side encryption is implemented for this layer. // IsEncryptionSupported returns whether server side encryption is implemented for this layer.
func (xl xlObjects) IsEncryptionSupported() bool { func (er erasureObjects) IsEncryptionSupported() bool {
return true return true
} }
// IsCompressionSupported returns whether compression is applicable for this layer. // IsCompressionSupported returns whether compression is applicable for this layer.
func (xl xlObjects) IsCompressionSupported() bool { func (er erasureObjects) IsCompressionSupported() bool {
return true return true
} }
func (xl xlObjects) IsTaggingSupported() bool { // IsTaggingSupported indicates whether erasureObjects implements tagging support.
func (er erasureObjects) IsTaggingSupported() bool {
return true return true
} }

143
cmd/erasure-coding.go Normal file
View File

@ -0,0 +1,143 @@
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"sync"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/cmd/logger"
)
// Erasure - erasure encoding details.
type Erasure struct {
encoder func() reedsolomon.Encoder
dataBlocks, parityBlocks int
blockSize int64
}
// NewErasure creates a new ErasureStorage.
func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) {
// Check the parameters for sanity now.
if dataBlocks <= 0 || parityBlocks <= 0 {
return e, reedsolomon.ErrInvShardNum
}
if dataBlocks+parityBlocks > 256 {
return e, reedsolomon.ErrMaxShardNum
}
e = Erasure{
dataBlocks: dataBlocks,
parityBlocks: parityBlocks,
blockSize: blockSize,
}
// Encoder when needed.
var enc reedsolomon.Encoder
var once sync.Once
e.encoder = func() reedsolomon.Encoder {
once.Do(func() {
e, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(int(e.ShardSize())))
if err != nil {
// Error conditions should be checked above.
panic(err)
}
enc = e
})
return enc
}
return
}
// EncodeData encodes the given data and returns the erasure-coded data.
// It returns an error if the erasure coding failed.
func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) {
if len(data) == 0 {
return make([][]byte, e.dataBlocks+e.parityBlocks), nil
}
encoded, err := e.encoder().Split(data)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
if err = e.encoder().Encode(encoded); err != nil {
logger.LogIf(ctx, err)
return nil, err
}
return encoded, nil
}
// DecodeDataBlocks decodes the given erasure-coded data.
// It only decodes the data blocks but does not verify them.
// It returns an error if the decoding failed.
func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
var isZero = 0
for _, b := range data[:] {
if len(b) == 0 {
isZero++
break
}
}
if isZero == 0 || isZero == len(data) {
// If all are zero, payload is 0 bytes.
return nil
}
return e.encoder().ReconstructData(data)
}
// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it.
// It returns an error if the decoding failed.
func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error {
if err := e.encoder().Reconstruct(data); err != nil {
logger.LogIf(ctx, err)
return err
}
return nil
}
// ShardSize - returns actual shared size from erasure blockSize.
func (e *Erasure) ShardSize() int64 {
return ceilFrac(e.blockSize, int64(e.dataBlocks))
}
// ShardFileSize - returns final erasure size from original size.
func (e *Erasure) ShardFileSize(totalLength int64) int64 {
if totalLength == 0 {
return 0
}
if totalLength == -1 {
return -1
}
numShards := totalLength / e.blockSize
lastBlockSize := totalLength % int64(e.blockSize)
lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
// ShardFileOffset - returns the effective offset where erasure reading begins.
func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64 {
shardSize := e.ShardSize()
shardFileSize := e.ShardFileSize(totalLength)
endShard := (startOffset + int64(length)) / e.blockSize
tillOffset := endShard*shardSize + shardSize
if tillOffset > shardFileSize {
tillOffset = shardFileSize
}
return tillOffset
}

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -24,8 +24,8 @@ import (
) )
// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice. // getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
func (xl xlObjects) getLoadBalancedDisks() (newDisks []StorageAPI) { func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) {
disks := xl.getDisks() disks := er.getDisks()
// Based on the random shuffling return back randomized disks. // Based on the random shuffling return back randomized disks.
for _, i := range hashOrder(UTCNow().String(), len(disks)) { for _, i := range hashOrder(UTCNow().String(), len(disks)) {
newDisks = append(newDisks, disks[i-1]) newDisks = append(newDisks, disks[i-1])
@ -36,13 +36,13 @@ func (xl xlObjects) getLoadBalancedDisks() (newDisks []StorageAPI) {
// This function does the following check, suppose // This function does the following check, suppose
// object is "a/b/c/d", stat makes sure that objects ""a/b/c"" // object is "a/b/c/d", stat makes sure that objects ""a/b/c""
// "a/b" and "a" do not exist. // "a/b" and "a" do not exist.
func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool { func (er erasureObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
var isParentDirObject func(string) bool var isParentDirObject func(string) bool
isParentDirObject = func(p string) bool { isParentDirObject = func(p string) bool {
if p == "." || p == SlashSeparator { if p == "." || p == SlashSeparator {
return false return false
} }
if xl.isObject(bucket, p) { if er.isObject(ctx, bucket, p) {
// If there is already a file at prefix "p", return true. // If there is already a file at prefix "p", return true.
return true return true
} }
@ -53,9 +53,9 @@ func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string
} }
// isObject - returns `true` if the prefix is an object i.e if // isObject - returns `true` if the prefix is an object i.e if
// `xl.json` exists at the leaf, false otherwise. // `xl.meta` exists at the leaf, false otherwise.
func (xl xlObjects) isObject(bucket, prefix string) (ok bool) { func (er erasureObjects) isObject(ctx context.Context, bucket, prefix string) (ok bool) {
storageDisks := xl.getDisks() storageDisks := er.getDisks()
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
@ -66,22 +66,15 @@ func (xl xlObjects) isObject(bucket, prefix string) (ok bool) {
return errDiskNotFound return errDiskNotFound
} }
// Check if 'prefix' is an object on this 'disk', else continue the check the next disk // Check if 'prefix' is an object on this 'disk', else continue the check the next disk
fi, err := storageDisks[index].StatFile(bucket, pathJoin(prefix, xlMetaJSONFile)) return storageDisks[index].CheckFile(bucket, prefix)
if err != nil {
return err
}
if fi.Size == 0 {
return errCorruptedFormat
}
return nil
}, index) }, index)
} }
// NOTE: Observe we are not trying to read `xl.json` and figure out the actual // NOTE: Observe we are not trying to read `xl.meta` and figure out the actual
// quorum intentionally, but rely on the default case scenario. Actual quorum // quorum intentionally, but rely on the default case scenario. Actual quorum
// verification will happen by top layer by using getObjectInfo() and will be // verification will happen by top layer by using getObjectInfo() and will be
// ignored if necessary. // ignored if necessary.
readQuorum := getReadQuorum(len(storageDisks)) readQuorum := getReadQuorum(len(storageDisks))
return reduceReadQuorumErrs(GlobalContext, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil return reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum) == nil
} }

View File

@ -24,13 +24,13 @@ import (
) )
// Tests for if parent directory is object // Tests for if parent directory is object
func TestXLParentDirIsObject(t *testing.T) { func TestErasureParentDirIsObject(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
obj, fsDisks, err := prepareXL16(ctx) obj, fsDisks, err := prepareErasure16(ctx)
if err != nil { if err != nil {
t.Fatalf("Unable to initialize 'XL' object layer.") t.Fatalf("Unable to initialize 'Erasure' object layer.")
} }
// Remove all disks. // Remove all disks.
@ -41,7 +41,7 @@ func TestXLParentDirIsObject(t *testing.T) {
bucketName := "testbucket" bucketName := "testbucket"
objectName := "object" objectName := "object"
if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
objectContent := "12345" objectContent := "12345"
@ -54,7 +54,7 @@ func TestXLParentDirIsObject(t *testing.T) {
t.Fatalf("Unexpected object name returned got %s, expected %s", objInfo.Name, objectName) t.Fatalf("Unexpected object name returned got %s, expected %s", objInfo.Name, objectName)
} }
z := obj.(*xlZones) z := obj.(*erasureZones)
xl := z.zones[0].sets[0] xl := z.zones[0].sets[0]
testCases := []struct { testCases := []struct {
parentIsObject bool parentIsObject bool

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -191,7 +191,7 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
return newBuf, nil return newBuf, nil
} }
return nil, errXLReadQuorum return nil, errErasureReadQuorum
} }
type errDecodeHealRequired struct { type errDecodeHealRequired struct {

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -132,7 +132,7 @@ func TestErasureDecode(t *testing.T) {
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data) tillOffset := erasure.ShardFileOffset(test.offset, test.length, test.data)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize()) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize())
} }
@ -163,7 +163,7 @@ func TestErasureDecode(t *testing.T) {
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
tillOffset := erasure.ShardFileTillOffset(test.offset, test.length, test.data) tillOffset := erasure.ShardFileOffset(test.offset, test.length, test.data)
bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize()) bitrotReaders[index] = newBitrotReader(disk, "testbucket", "object", tillOffset, writeAlgorithm, bitrotWriterSum(writers[index]), erasure.ShardSize())
} }
for j := range disks[:test.offDisks] { for j := range disks[:test.offDisks] {
@ -268,7 +268,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
tillOffset := erasure.ShardFileTillOffset(offset, readLen, length) tillOffset := erasure.ShardFileOffset(offset, readLen, length)
bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
} }
err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length, nil) err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length, nil)
@ -330,7 +330,7 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
if writers[index] == nil { if writers[index] == nil {
continue continue
} }
tillOffset := erasure.ShardFileTillOffset(0, size, size) tillOffset := erasure.ShardFileOffset(0, size, size)
bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) bitrotReaders[index] = newStreamingBitrotReader(disk, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
} }
if err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size, nil); err != nil { if err = erasure.Decode(context.Background(), bytes.NewBuffer(content[:0]), bitrotReaders, 0, size, size, nil); err != nil {

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -18,11 +18,11 @@ package cmd
import "errors" import "errors"
// errXLReadQuorum - did not meet read quorum. // errErasureReadQuorum - did not meet read quorum.
var errXLReadQuorum = errors.New("Read failed. Insufficient number of disks online") var errErasureReadQuorum = errors.New("Read failed. Insufficient number of disks online")
// errXLWriteQuorum - did not meet write quorum. // errErasureWriteQuorum - did not meet write quorum.
var errXLWriteQuorum = errors.New("Write failed. Insufficient number of disks online") var errErasureWriteQuorum = errors.New("Write failed. Insufficient number of disks online")
// errNoHealRequired - returned when healing is attempted on a previously healed disks. // errNoHealRequired - returned when healing is attempted on a previously healed disks.
var errNoHealRequired = errors.New("No healing is required") var errNoHealRequired = errors.New("No healing is required")

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -71,7 +71,7 @@ func TestErasureHeal(t *testing.T) {
// create some test data // create some test data
setup, err := newErasureTestSetup(test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) setup, err := newErasureTestSetup(test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)
if err != nil { if err != nil {
t.Fatalf("Test %d: failed to setup XL environment: %v", i, err) t.Fatalf("Test %d: failed to setup Erasure environment: %v", i, err)
} }
disks := setup.disks disks := setup.disks
erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize)

View File

@ -18,10 +18,8 @@ package cmd
import ( import (
"context" "context"
"fmt"
"time" "time"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
) )
@ -31,7 +29,7 @@ func commonTime(modTimes []time.Time) (modTime time.Time, count int) {
timeOccurenceMap := make(map[time.Time]int) timeOccurenceMap := make(map[time.Time]int)
// Ignore the uuid sentinel and count the rest. // Ignore the uuid sentinel and count the rest.
for _, time := range modTimes { for _, time := range modTimes {
if time == timeSentinel { if time.Equal(timeSentinel) {
continue continue
} }
timeOccurenceMap[time]++ timeOccurenceMap[time]++
@ -61,45 +59,45 @@ func bootModtimes(diskCount int) []time.Time {
return modTimes return modTimes
} }
// Extracts list of times from xlMetaV1 slice and returns, skips // Extracts list of times from FileInfo slice and returns, skips
// slice elements which have errors. // slice elements which have errors.
func listObjectModtimes(partsMetadata []xlMetaV1, errs []error) (modTimes []time.Time) { func listObjectModtimes(partsMetadata []FileInfo, errs []error) (modTimes []time.Time) {
modTimes = bootModtimes(len(partsMetadata)) modTimes = bootModtimes(len(partsMetadata))
for index, metadata := range partsMetadata { for index, metadata := range partsMetadata {
if errs[index] != nil { if errs[index] != nil {
continue continue
} }
// Once the file is found, save the uuid saved on disk. // Once the file is found, save the uuid saved on disk.
modTimes[index] = metadata.Stat.ModTime modTimes[index] = metadata.ModTime
} }
return modTimes return modTimes
} }
// Notes: // Notes:
// There are 5 possible states a disk could be in, // There are 5 possible states a disk could be in,
// 1. __online__ - has the latest copy of xl.json - returned by listOnlineDisks // 1. __online__ - has the latest copy of xl.meta - returned by listOnlineDisks
// //
// 2. __offline__ - err == errDiskNotFound // 2. __offline__ - err == errDiskNotFound
// //
// 3. __availableWithParts__ - has the latest copy of xl.json and has all // 3. __availableWithParts__ - has the latest copy of xl.meta and has all
// parts with checksums matching; returned by disksWithAllParts // parts with checksums matching; returned by disksWithAllParts
// //
// 4. __outdated__ - returned by outDatedDisk, provided []StorageAPI // 4. __outdated__ - returned by outDatedDisk, provided []StorageAPI
// returned by diskWithAllParts is passed for latestDisks. // returned by diskWithAllParts is passed for latestDisks.
// - has an old copy of xl.json // - has an old copy of xl.meta
// - doesn't have xl.json (errFileNotFound) // - doesn't have xl.meta (errFileNotFound)
// - has the latest xl.json but one or more parts are corrupt // - has the latest xl.meta but one or more parts are corrupt
// //
// 5. __missingParts__ - has the latest copy of xl.json but has some parts // 5. __missingParts__ - has the latest copy of xl.meta but has some parts
// missing. This is identified separately since this may need manual // missing. This is identified separately since this may need manual
// inspection to understand the root cause. E.g, this could be due to // inspection to understand the root cause. E.g, this could be due to
// backend filesystem corruption. // backend filesystem corruption.
// listOnlineDisks - returns // listOnlineDisks - returns
// - a slice of disks where disk having 'older' xl.json (or nothing) // - a slice of disks where disk having 'older' xl.meta (or nothing)
// are set to nil. // are set to nil.
// - latest (in time) of the maximally occurring modTime(s). // - latest (in time) of the maximally occurring modTime(s).
func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error) (onlineDisks []StorageAPI, modTime time.Time) { func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error) (onlineDisks []StorageAPI, modTime time.Time) {
onlineDisks = make([]StorageAPI, len(disks)) onlineDisks = make([]StorageAPI, len(disks))
// List all the file commit ids from parts metadata. // List all the file commit ids from parts metadata.
@ -110,7 +108,7 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error)
// Create a new online disks slice, which have common uuid. // Create a new online disks slice, which have common uuid.
for index, t := range modTimes { for index, t := range modTimes {
if t == modTime { if t.Equal(modTime) {
onlineDisks[index] = disks[index] onlineDisks[index] = disks[index]
} else { } else {
onlineDisks[index] = nil onlineDisks[index] = nil
@ -119,89 +117,67 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []xlMetaV1, errs []error)
return onlineDisks, modTime return onlineDisks, modTime
} }
// Returns the latest updated xlMeta files and error in case of failure. // Returns the latest updated FileInfo files and error in case of failure.
func getLatestXLMeta(ctx context.Context, partsMetadata []xlMetaV1, errs []error) (xlMetaV1, error) { func getLatestFileInfo(ctx context.Context, partsMetadata []FileInfo, errs []error) (FileInfo, error) {
// There should be atleast half correct entries, if not return failure // There should be atleast half correct entries, if not return failure
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, len(partsMetadata)/2); reducedErr != nil { if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, len(partsMetadata)/2); reducedErr != nil {
return xlMetaV1{}, reducedErr return FileInfo{}, reducedErr
} }
// List all the file commit ids from parts metadata. // List all the file commit ids from parts metadata.
modTimes := listObjectModtimes(partsMetadata, errs) modTimes := listObjectModtimes(partsMetadata, errs)
// Count all latest updated xlMeta values // Count all latest updated FileInfo values
var count int var count int
var latestXLMeta xlMetaV1 var latestFileInfo FileInfo
// Reduce list of UUIDs to a single common value - i.e. the last updated Time // Reduce list of UUIDs to a single common value - i.e. the last updated Time
modTime, _ := commonTime(modTimes) modTime, _ := commonTime(modTimes)
// Interate through all the modTimes and count the xlMeta(s) with latest time. // Interate through all the modTimes and count the FileInfo(s) with latest time.
for index, t := range modTimes { for index, t := range modTimes {
if t == modTime && partsMetadata[index].IsValid() { if t.Equal(modTime) && partsMetadata[index].IsValid() {
latestXLMeta = partsMetadata[index] latestFileInfo = partsMetadata[index]
count++ count++
} }
} }
if count < len(partsMetadata)/2 { if count < len(partsMetadata)/2 {
return xlMetaV1{}, errXLReadQuorum return FileInfo{}, errErasureReadQuorum
} }
return latestXLMeta, nil return latestFileInfo, nil
} }
// disksWithAllParts - This function needs to be called with // disksWithAllParts - This function needs to be called with
// []StorageAPI returned by listOnlineDisks. Returns, // []StorageAPI returned by listOnlineDisks. Returns,
// //
// - disks which have all parts specified in the latest xl.json. // - disks which have all parts specified in the latest xl.meta.
// //
// - slice of errors about the state of data files on disk - can have // - slice of errors about the state of data files on disk - can have
// a not-found error or a hash-mismatch error. // a not-found error or a hash-mismatch error.
func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []xlMetaV1, errs []error, bucket, func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo, errs []error, bucket,
object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) { object string, scanMode madmin.HealScanMode) ([]StorageAPI, []error) {
availableDisks := make([]StorageAPI, len(onlineDisks)) availableDisks := make([]StorageAPI, len(onlineDisks))
dataErrs := make([]error, len(onlineDisks)) dataErrs := make([]error, len(onlineDisks))
for i, onlineDisk := range onlineDisks { for i, onlineDisk := range onlineDisks {
if onlineDisk == nil { if errs[i] != nil {
dataErrs[i] = errs[i] dataErrs[i] = errs[i]
continue continue
} }
if onlineDisk == nil {
dataErrs[i] = errDiskNotFound
continue
}
switch scanMode { switch scanMode {
case madmin.HealDeepScan: case madmin.HealDeepScan:
erasure := partsMetadata[i].Erasure // disk has a valid xl.meta but may not have all the
// disk has a valid xl.json but may not have all the
// parts. This is considered an outdated disk, since // parts. This is considered an outdated disk, since
// it needs healing too. // it needs healing too.
for _, part := range partsMetadata[i].Parts { dataErrs[i] = onlineDisk.VerifyFile(bucket, object, partsMetadata[i])
checksumInfo := erasure.GetChecksumInfo(part.Number)
partPath := pathJoin(object, fmt.Sprintf("part.%d", part.Number))
err := onlineDisk.VerifyFile(bucket, partPath, erasure.ShardFileSize(part.Size), checksumInfo.Algorithm, checksumInfo.Hash, erasure.ShardSize())
if err != nil {
if !IsErr(err, []error{
errFileNotFound,
errVolumeNotFound,
errFileCorrupt,
}...) {
logger.GetReqInfo(ctx).AppendTags("disk", onlineDisk.String())
logger.LogIf(ctx, err)
}
dataErrs[i] = err
break
}
}
case madmin.HealNormalScan: case madmin.HealNormalScan:
for _, part := range partsMetadata[i].Parts { dataErrs[i] = onlineDisk.CheckParts(bucket, object, partsMetadata[i])
partPath := pathJoin(object, fmt.Sprintf("part.%d", part.Number))
_, err := onlineDisk.StatFile(bucket, partPath)
if err != nil {
dataErrs[i] = err
break
}
}
} }
if dataErrs[i] == nil { if dataErrs[i] == nil {

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -95,9 +95,9 @@ func TestListOnlineDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
obj, disks, err := prepareXL16(ctx) obj, disks, err := prepareErasure16(ctx)
if err != nil { if err != nil {
t.Fatalf("Prepare XL backend failed - %v", err) t.Fatalf("Prepare Erasure backend failed - %v", err)
} }
defer removeRoots(disks) defer removeRoots(disks)
@ -141,9 +141,9 @@ func TestListOnlineDisks(t *testing.T) {
modTimes: modTimesThreeNone, modTimes: modTimesThreeNone,
expectedTime: threeNanoSecs, expectedTime: threeNanoSecs,
errs: []error{ errs: []error{
// Disks that have a valid xl.json. // Disks that have a valid xl.meta.
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
// Majority of disks don't have xl.json. // Majority of disks don't have xl.meta.
errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound,
errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound,
errFileNotFound, errDiskAccessDenied, errFileNotFound, errDiskAccessDenied,
@ -156,9 +156,9 @@ func TestListOnlineDisks(t *testing.T) {
modTimes: modTimesThreeNone, modTimes: modTimesThreeNone,
expectedTime: threeNanoSecs, expectedTime: threeNanoSecs,
errs: []error{ errs: []error{
// Disks that have a valid xl.json. // Disks that have a valid xl.meta.
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
// Majority of disks don't have xl.json. // Majority of disks don't have xl.meta.
errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound,
errFileNotFound, errFileNotFound, errFileNotFound, errFileNotFound,
errFileNotFound, errDiskAccessDenied, errFileNotFound, errDiskAccessDenied,
@ -170,27 +170,34 @@ func TestListOnlineDisks(t *testing.T) {
} }
bucket := "bucket" bucket := "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatalf("Failed to make a bucket %v", err)
}
object := "object" object := "object"
data := bytes.Repeat([]byte("a"), 1024) data := bytes.Repeat([]byte("a"), 1024)
z := obj.(*xlZones) z := obj.(*erasureZones)
xlDisks := z.zones[0].sets[0].getDisks() erasureDisks := z.zones[0].sets[0].getDisks()
for i, test := range testCases { for i, test := range testCases {
// Prepare bucket/object backend for the tests below. _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
// Cleanup from previous test.
obj.DeleteObject(GlobalContext, bucket, object)
obj.DeleteBucket(GlobalContext, bucket, false)
err = obj.MakeBucketWithLocation(GlobalContext, "bucket", "", false)
if err != nil {
t.Fatalf("Failed to make a bucket %v", err)
}
_, err = obj.PutObject(GlobalContext, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "")
fi, err := getLatestFileInfo(ctx, partsMetadata, errs)
if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err)
}
for j := range partsMetadata {
if errs[j] != nil {
t.Fatalf("Test %d: expected error to be nil: %s", i+1, errs[j])
}
partsMetadata[j].ModTime = test.modTimes[j]
}
tamperedIndex := -1 tamperedIndex := -1
switch test._tamperBackend { switch test._tamperBackend {
case deletePart: case deletePart:
@ -199,11 +206,11 @@ func TestListOnlineDisks(t *testing.T) {
continue continue
} }
// Remove a part from a disk // Remove a part from a disk
// which has a valid xl.json, // which has a valid xl.meta,
// and check if that disk // and check if that disk
// appears in outDatedDisks. // appears in outDatedDisks.
tamperedIndex = index tamperedIndex = index
dErr := xlDisks[index].DeleteFile(bucket, filepath.Join(object, "part.1")) dErr := erasureDisks[index].DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1"))
if dErr != nil { if dErr != nil {
t.Fatalf("Test %d: Failed to delete %s - %v", i+1, t.Fatalf("Test %d: Failed to delete %s - %v", i+1,
filepath.Join(object, "part.1"), dErr) filepath.Join(object, "part.1"), dErr)
@ -216,11 +223,11 @@ func TestListOnlineDisks(t *testing.T) {
continue continue
} }
// Corrupt a part from a disk // Corrupt a part from a disk
// which has a valid xl.json, // which has a valid xl.meta,
// and check if that disk // and check if that disk
// appears in outDatedDisks. // appears in outDatedDisks.
tamperedIndex = index tamperedIndex = index
filePath := pathJoin(xlDisks[index].String(), bucket, object, "part.1") filePath := pathJoin(erasureDisks[index].String(), bucket, object, fi.DataDir, "part.1")
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
if err != nil { if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err) t.Fatalf("Failed to open %s: %s\n", filePath, err)
@ -232,27 +239,19 @@ func TestListOnlineDisks(t *testing.T) {
} }
partsMetadata, errs := readAllXLMetadata(GlobalContext, xlDisks, bucket, object) onlineDisks, modTime := listOnlineDisks(erasureDisks, partsMetadata, test.errs)
for i := range partsMetadata {
if errs[i] != nil {
t.Fatalf("Test %d: expected error to be nil: %s", i+1, errs[i].Error())
}
partsMetadata[i].Stat.ModTime = test.modTimes[i]
}
onlineDisks, modTime := listOnlineDisks(xlDisks, partsMetadata, test.errs)
if !modTime.Equal(test.expectedTime) { if !modTime.Equal(test.expectedTime) {
t.Fatalf("Test %d: Expected modTime to be equal to %v but was found to be %v", t.Fatalf("Test %d: Expected modTime to be equal to %v but was found to be %v",
i+1, test.expectedTime, modTime) i+1, test.expectedTime, modTime)
} }
availableDisks, newErrs := disksWithAllParts(GlobalContext, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan) availableDisks, newErrs := disksWithAllParts(ctx, onlineDisks, partsMetadata, test.errs, bucket, object, madmin.HealDeepScan)
test.errs = newErrs test.errs = newErrs
if test._tamperBackend != noTamper { if test._tamperBackend != noTamper {
if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil { if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil {
t.Fatalf("Test %d: disk (%v) with part.1 missing is not a disk with available data", t.Fatalf("Test %d: disk (%v) with part.1 missing is not a disk with available data",
i+1, xlDisks[tamperedIndex]) i+1, erasureDisks[tamperedIndex])
} }
} }
@ -262,9 +261,9 @@ func TestListOnlineDisks(t *testing.T) {
func TestDisksWithAllParts(t *testing.T) { func TestDisksWithAllParts(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
obj, disks, err := prepareXL16(ctx) obj, disks, err := prepareErasure16(ctx)
if err != nil { if err != nil {
t.Fatalf("Prepare XL backend failed - %v", err) t.Fatalf("Prepare Erasure backend failed - %v", err)
} }
defer removeRoots(disks) defer removeRoots(disks)
@ -273,10 +272,10 @@ func TestDisksWithAllParts(t *testing.T) {
// make data with more than one part // make data with more than one part
partCount := 3 partCount := 3
data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount)
z := obj.(*xlZones) z := obj.(*erasureZones)
xl := z.zones[0].sets[0] s := z.zones[0].sets[0]
xlDisks := xl.getDisks() erasureDisks := s.getDisks()
err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }
@ -286,22 +285,22 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
_, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) _, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "")
readQuorum := len(xlDisks) / 2 readQuorum := len(erasureDisks) / 2
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
t.Fatalf("Failed to read xl meta data %v", reducedErr) t.Fatalf("Failed to read xl meta data %v", reducedErr)
} }
// Test that all disks are returned without any failures with // Test that all disks are returned without any failures with
// unmodified meta data // unmodified meta data
partsMetadata, errs := readAllXLMetadata(ctx, xlDisks, bucket, object) partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "")
if err != nil { if err != nil {
t.Fatalf("Failed to read xl meta data %v", err) t.Fatalf("Failed to read xl meta data %v", err)
} }
filteredDisks, errs := disksWithAllParts(ctx, xlDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) filteredDisks, errs := disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan)
if len(filteredDisks) != len(xlDisks) { if len(filteredDisks) != len(erasureDisks) {
t.Errorf("Unexpected number of disks: %d", len(filteredDisks)) t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
} }
@ -324,7 +323,7 @@ func TestDisksWithAllParts(t *testing.T) {
for diskIndex, partName := range diskFailures { for diskIndex, partName := range diskFailures {
for i := range partsMetadata[diskIndex].Erasure.Checksums { for i := range partsMetadata[diskIndex].Erasure.Checksums {
if fmt.Sprintf("part.%d", i+1) == partName { if fmt.Sprintf("part.%d", i+1) == partName {
filePath := pathJoin(xlDisks[diskIndex].String(), bucket, object, partName) filePath := pathJoin(erasureDisks[diskIndex].String(), bucket, object, partsMetadata[diskIndex].DataDir, partName)
f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0) f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_SYNC, 0)
if err != nil { if err != nil {
t.Fatalf("Failed to open %s: %s\n", filePath, err) t.Fatalf("Failed to open %s: %s\n", filePath, err)
@ -335,10 +334,10 @@ func TestDisksWithAllParts(t *testing.T) {
} }
} }
errs = make([]error, len(xlDisks)) errs = make([]error, len(erasureDisks))
filteredDisks, errs = disksWithAllParts(ctx, xlDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan) filteredDisks, errs = disksWithAllParts(ctx, erasureDisks, partsMetadata, errs, bucket, object, madmin.HealDeepScan)
if len(filteredDisks) != len(xlDisks) { if len(filteredDisks) != len(erasureDisks) {
t.Errorf("Unexpected number of disks: %d", len(filteredDisks)) t.Errorf("Unexpected number of disks: %d", len(filteredDisks))
} }

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,6 +20,7 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"sync"
"time" "time"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
@ -27,12 +28,12 @@ import (
"github.com/minio/minio/pkg/sync/errgroup" "github.com/minio/minio/pkg/sync/errgroup"
) )
func (xl xlObjects) ReloadFormat(ctx context.Context, dryRun bool) error { func (er erasureObjects) ReloadFormat(ctx context.Context, dryRun bool) error {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{} return NotImplemented{}
} }
func (xl xlObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { func (er erasureObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return madmin.HealResultItem{}, NotImplemented{} return madmin.HealResultItem{}, NotImplemented{}
} }
@ -40,14 +41,14 @@ func (xl xlObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealRes
// Heals a bucket if it doesn't exist on one of the disks, additionally // Heals a bucket if it doesn't exist on one of the disks, additionally
// also heals the missing entries for bucket metadata files // also heals the missing entries for bucket metadata files
// `policy.json, notification.xml, listeners.json`. // `policy.json, notification.xml, listeners.json`.
func (xl xlObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) ( func (er erasureObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (
result madmin.HealResultItem, err error) { result madmin.HealResultItem, err error) {
if !dryRun { if !dryRun {
defer ObjectPathUpdated(bucket) defer ObjectPathUpdated(bucket)
} }
storageDisks := xl.getDisks() storageDisks := er.getDisks()
storageEndpoints := xl.getEndpoints() storageEndpoints := er.getEndpoints()
// get write quorum for an object // get write quorum for an object
writeQuorum := getWriteQuorum(len(storageDisks)) writeQuorum := getWriteQuorum(len(storageDisks))
@ -158,7 +159,6 @@ func healBucket(ctx context.Context, storageDisks []StorageAPI, storageEndpoints
State: afterState[i], State: afterState[i],
}) })
} }
return res, nil return res, nil
} }
@ -196,22 +196,22 @@ func listAllBuckets(storageDisks []StorageAPI, healBuckets map[string]VolInfo) (
// Only heal on disks where we are sure that healing is needed. We can expand // Only heal on disks where we are sure that healing is needed. We can expand
// this list as and when we figure out more errors can be added to this list safely. // this list as and when we figure out more errors can be added to this list safely.
func shouldHealObjectOnDisk(xlErr, dataErr error, meta xlMetaV1, quorumModTime time.Time) bool { func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, quorumModTime time.Time) bool {
switch xlErr { switch erErr {
case errFileNotFound: case errFileNotFound:
return true return true
case errCorruptedFormat: case errCorruptedFormat:
return true return true
} }
if xlErr == nil { if erErr == nil {
// If xl.json was read fine but there may be problem with the part.N files. // If er.meta was read fine but there may be problem with the part.N files.
if IsErr(dataErr, []error{ if IsErr(dataErr, []error{
errFileNotFound, errFileNotFound,
errFileCorrupt, errFileCorrupt,
}...) { }...) {
return true return true
} }
if !quorumModTime.Equal(meta.Stat.ModTime) { if !quorumModTime.Equal(meta.ModTime) {
return true return true
} }
} }
@ -219,20 +219,20 @@ func shouldHealObjectOnDisk(xlErr, dataErr error, meta xlMetaV1, quorumModTime t
} }
// Heals an object by re-writing corrupt/missing erasure blocks. // Heals an object by re-writing corrupt/missing erasure blocks.
func (xl xlObjects) healObject(ctx context.Context, bucket string, object string, func (er erasureObjects) healObject(ctx context.Context, bucket string, object string,
partsMetadata []xlMetaV1, errs []error, latestXLMeta xlMetaV1, partsMetadata []FileInfo, errs []error, latestFileInfo FileInfo,
dryRun bool, remove bool, scanMode madmin.HealScanMode) (result madmin.HealResultItem, err error) { dryRun bool, remove bool, scanMode madmin.HealScanMode) (result madmin.HealResultItem, err error) {
dataBlocks := latestXLMeta.Erasure.DataBlocks dataBlocks := latestFileInfo.Erasure.DataBlocks
storageDisks := xl.getDisks() storageDisks := er.getDisks()
storageEndpoints := xl.getEndpoints() storageEndpoints := er.getEndpoints()
// List of disks having latest version of the object xl.json // List of disks having latest version of the object er.meta
// (by modtime). // (by modtime).
latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) latestDisks, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// List of disks having all parts as per latest xl.json. // List of disks having all parts as per latest er.meta.
availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode) availableDisks, dataErrs := disksWithAllParts(ctx, latestDisks, partsMetadata, errs, bucket, object, scanMode)
// Initialize heal result object // Initialize heal result object
@ -241,8 +241,8 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
DiskCount: len(storageDisks), DiskCount: len(storageDisks),
ParityBlocks: latestXLMeta.Erasure.ParityBlocks, ParityBlocks: latestFileInfo.Erasure.ParityBlocks,
DataBlocks: latestXLMeta.Erasure.DataBlocks, DataBlocks: latestFileInfo.Erasure.DataBlocks,
// Initialize object size to -1, so we can detect if we are // Initialize object size to -1, so we can detect if we are
// unable to reliably find the object size. // unable to reliably find the object size.
@ -263,7 +263,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
numAvailableDisks++ numAvailableDisks++
// If data is sane on any one disk, we can // If data is sane on any one disk, we can
// extract the correct object size. // extract the correct object size.
result.ObjectSize = partsMetadata[i].Stat.Size result.ObjectSize = partsMetadata[i].Size
result.ParityBlocks = partsMetadata[i].Erasure.ParityBlocks result.ParityBlocks = partsMetadata[i].Erasure.ParityBlocks
result.DataBlocks = partsMetadata[i].Erasure.DataBlocks result.DataBlocks = partsMetadata[i].Erasure.DataBlocks
case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound: case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound:
@ -307,18 +307,18 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
// If less than read quorum number of disks have all the parts // If less than read quorum number of disks have all the parts
// of the data, we can't reconstruct the erasure-coded data. // of the data, we can't reconstruct the erasure-coded data.
if numAvailableDisks < dataBlocks { if numAvailableDisks < dataBlocks {
// Check if xl.json, and corresponding parts are also missing. // Check if er.meta, and corresponding parts are also missing.
if m, ok := isObjectDangling(partsMetadata, errs, dataErrs); ok { if m, ok := isObjectDangling(partsMetadata, errs, dataErrs); ok {
writeQuorum := m.Erasure.DataBlocks + 1 writeQuorum := m.Erasure.DataBlocks + 1
if m.Erasure.DataBlocks == 0 { if m.Erasure.DataBlocks == 0 {
writeQuorum = getWriteQuorum(len(storageDisks)) writeQuorum = getWriteQuorum(len(storageDisks))
} }
if !dryRun && remove { if !dryRun && remove {
err = xl.deleteObject(ctx, bucket, object, writeQuorum, false) err = er.deleteObject(ctx, bucket, object, writeQuorum)
} }
return defaultHealResult(latestXLMeta, storageDisks, storageEndpoints, errs, bucket, object), err return defaultHealResult(latestFileInfo, storageDisks, storageEndpoints, errs, bucket, object), err
} }
return result, toObjectErr(errXLReadQuorum, bucket, object) return result, toObjectErr(errErasureReadQuorum, bucket, object)
} }
if disksToHealCount == 0 { if disksToHealCount == 0 {
@ -332,32 +332,19 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
return result, nil return result, nil
} }
// Latest xlMetaV1 for reference. If a valid metadata is not // Latest FileInfo for reference. If a valid metadata is not
// present, it is as good as object not found. // present, it is as good as object not found.
latestMeta, pErr := pickValidXLMeta(ctx, partsMetadata, modTime, dataBlocks) latestMeta, pErr := pickValidFileInfo(ctx, partsMetadata, modTime, dataBlocks)
if pErr != nil { if pErr != nil {
return result, toObjectErr(pErr, bucket, object) return result, toObjectErr(pErr, bucket, object)
} }
// Clear data files of the object on outdated disks cleanFileInfo := func(fi FileInfo) FileInfo {
for _, disk := range outDatedDisks { // Returns a copy of the 'fi' with checksums and parts nil'ed.
// Before healing outdated disks, we need to remove nfi := fi
// xl.json and part files from "bucket/object/" so nfi.Erasure.Checksums = nil
// that rename(minioMetaBucket, "tmp/tmpuuid/", nfi.Parts = nil
// "bucket", "object/") succeeds. return nfi
if disk == nil {
// Not an outdated disk.
continue
}
// List and delete the object directory,
files, derr := disk.ListDir(bucket, object, -1, "")
if derr == nil {
for _, entry := range files {
_ = disk.DeleteFile(bucket,
pathJoin(object, entry))
}
}
} }
// Reorder so that we have data disks first and parity disks next. // Reorder so that we have data disks first and parity disks next.
@ -368,7 +355,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
if outDatedDisks[i] == nil { if outDatedDisks[i] == nil {
continue continue
} }
partsMetadata[i] = newXLMetaFromXLMeta(latestMeta) partsMetadata[i] = cleanFileInfo(latestMeta)
} }
// We write at temporary location and then rename to final location. // We write at temporary location and then rename to final location.
@ -388,7 +375,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
partSize := latestMeta.Parts[partIndex].Size partSize := latestMeta.Parts[partIndex].Size
partActualSize := latestMeta.Parts[partIndex].ActualSize partActualSize := latestMeta.Parts[partIndex].ActualSize
partNumber := latestMeta.Parts[partIndex].Number partNumber := latestMeta.Parts[partIndex].Number
tillOffset := erasure.ShardFileTillOffset(0, partSize, partSize) tillOffset := erasure.ShardFileOffset(0, partSize, partSize)
readers := make([]io.ReaderAt, len(latestDisks)) readers := make([]io.ReaderAt, len(latestDisks))
checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm checksumAlgo := erasureInfo.GetChecksumInfo(partNumber).Algorithm
for i, disk := range latestDisks { for i, disk := range latestDisks {
@ -396,7 +383,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
continue continue
} }
checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(partNumber) checksumInfo := partsMetadata[i].Erasure.GetChecksumInfo(partNumber)
partPath := pathJoin(object, fmt.Sprintf("part.%d", partNumber)) partPath := pathJoin(object, latestMeta.DataDir, fmt.Sprintf("part.%d", partNumber))
readers[i] = newBitrotReader(disk, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize()) readers[i] = newBitrotReader(disk, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize())
} }
writers := make([]io.Writer, len(outDatedDisks)) writers := make([]io.Writer, len(outDatedDisks))
@ -404,21 +391,22 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
partPath := pathJoin(tmpID, fmt.Sprintf("part.%d", partNumber)) partPath := pathJoin(tmpID, latestMeta.DataDir, fmt.Sprintf("part.%d", partNumber))
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, tillOffset, checksumAlgo, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
} }
hErr := erasure.Heal(ctx, readers, writers, partSize) err = erasure.Heal(ctx, readers, writers, partSize)
closeBitrotReaders(readers) closeBitrotReaders(readers)
closeBitrotWriters(writers) closeBitrotWriters(writers)
if hErr != nil { if err != nil {
return result, toObjectErr(hErr, bucket, object) return result, toObjectErr(err, bucket, object)
} }
// outDatedDisks that had write errors should not be // outDatedDisks that had write errors should not be
// written to for remaining parts, so we nil it out. // written to for remaining parts, so we nil it out.
for i, disk := range outDatedDisks { for i, disk := range outDatedDisks {
if disk == nil { if disk == OfflineDisk {
continue continue
} }
// A non-nil stale disk which did not receive // A non-nil stale disk which did not receive
// a healed part checksum had a write error. // a healed part checksum had a write error.
if writers[i] == nil { if writers[i] == nil {
@ -426,6 +414,7 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
disksToHealCount-- disksToHealCount--
continue continue
} }
partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize) partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize)
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: partNumber, PartNumber: partNumber,
@ -436,33 +425,31 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
// If all disks are having errors, we give up. // If all disks are having errors, we give up.
if disksToHealCount == 0 { if disksToHealCount == 0 {
return result, fmt.Errorf("all disks without up-to-date data had write errors") return result, fmt.Errorf("all disks had write errors, unable to heal")
} }
} }
// Cleanup in case of xl.json writing failure // Cleanup in case of er.meta writing failure
writeQuorum := latestMeta.Erasure.DataBlocks + 1 writeQuorum := latestMeta.Erasure.DataBlocks + 1
defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpID, writeQuorum, false) defer er.deleteObject(ctx, minioMetaTmpBucket, tmpID, writeQuorum)
// Generate and write `xl.json` generated from other disks. // Generate and write `xl.meta` generated from other disks.
outDatedDisks, aErr := writeUniqueXLMetadata(ctx, outDatedDisks, minioMetaTmpBucket, tmpID, outDatedDisks, err = writeUniqueFileInfo(ctx, outDatedDisks, minioMetaTmpBucket, tmpID,
partsMetadata, diskCount(outDatedDisks)) partsMetadata, diskCount(outDatedDisks))
if aErr != nil { if err != nil {
return result, toObjectErr(aErr, bucket, object) return result, toObjectErr(err, bucket, object)
} }
// Rename from tmp location to the actual location. // Rename from tmp location to the actual location.
for _, disk := range outDatedDisks { for _, disk := range outDatedDisks {
if disk == nil { if disk == OfflineDisk {
continue continue
} }
// Attempt a rename now from healed data to final location. // Attempt a rename now from healed data to final location.
aErr = disk.RenameFile(minioMetaTmpBucket, retainSlash(tmpID), bucket, if err = disk.RenameData(minioMetaTmpBucket, tmpID, latestMeta.DataDir, bucket, object); err != nil {
retainSlash(object)) logger.LogIf(ctx, err)
if aErr != nil { return result, toObjectErr(err, bucket, object)
logger.LogIf(ctx, aErr)
return result, toObjectErr(aErr, bucket, object)
} }
for i, v := range result.Before.Drives { for i, v := range result.Before.Drives {
@ -473,16 +460,16 @@ func (xl xlObjects) healObject(ctx context.Context, bucket string, object string
} }
// Set the size of the object in the heal result // Set the size of the object in the heal result
result.ObjectSize = latestMeta.Stat.Size result.ObjectSize = latestMeta.Size
return result, nil return result, nil
} }
// healObjectDir - heals object directory specifically, this special call // healObjectDir - heals object directory specifically, this special call
// is needed since we do not have a special backend format for directories. // is needed since we do not have a special backend format for directories.
func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dryRun bool, remove bool) (hr madmin.HealResultItem, err error) { func (er erasureObjects) healObjectDir(ctx context.Context, bucket, object string, dryRun bool, remove bool) (hr madmin.HealResultItem, err error) {
storageDisks := xl.getDisks() storageDisks := er.getDisks()
storageEndpoints := xl.getEndpoints() storageEndpoints := er.getEndpoints()
// Initialize heal result object // Initialize heal result object
hr = madmin.HealResultItem{ hr = madmin.HealResultItem{
@ -502,7 +489,19 @@ func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dr
danglingObject := isObjectDirDangling(errs) danglingObject := isObjectDirDangling(errs)
if danglingObject { if danglingObject {
if !dryRun && remove { if !dryRun && remove {
xl.deleteObject(ctx, bucket, object, hr.DataBlocks+1, true) var wg sync.WaitGroup
// Remove versions in bulk for each disk
for index, disk := range storageDisks {
if disk == nil {
continue
}
wg.Add(1)
go func(index int, disk StorageAPI) {
defer wg.Done()
_ = disk.DeleteFile(bucket, object)
}(index, disk)
}
wg.Wait()
} }
} }
@ -548,7 +547,7 @@ func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dr
// Populates default heal result item entries with possible values when we are returning prematurely. // Populates default heal result item entries with possible values when we are returning prematurely.
// This is to ensure that in any circumstance we are not returning empty arrays with wrong values. // This is to ensure that in any circumstance we are not returning empty arrays with wrong values.
func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storageEndpoints []string, errs []error, bucket, object string) madmin.HealResultItem { func defaultHealResult(latestFileInfo FileInfo, storageDisks []StorageAPI, storageEndpoints []string, errs []error, bucket, object string) madmin.HealResultItem {
// Initialize heal result object // Initialize heal result object
result := madmin.HealResultItem{ result := madmin.HealResultItem{
Type: madmin.HealItemObject, Type: madmin.HealItemObject,
@ -560,8 +559,8 @@ func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storage
// unable to reliably find the object size. // unable to reliably find the object size.
ObjectSize: -1, ObjectSize: -1,
} }
if latestXLMeta.IsValid() { if latestFileInfo.IsValid() {
result.ObjectSize = latestXLMeta.Stat.Size result.ObjectSize = latestFileInfo.Size
} }
for index, disk := range storageDisks { for index, disk := range storageDisks {
@ -595,13 +594,13 @@ func defaultHealResult(latestXLMeta xlMetaV1, storageDisks []StorageAPI, storage
}) })
} }
if !latestXLMeta.IsValid() { if !latestFileInfo.IsValid() {
// Default to most common configuration for erasure blocks. // Default to most common configuration for erasure blocks.
result.ParityBlocks = getDefaultParityBlocks(len(storageDisks)) result.ParityBlocks = getDefaultParityBlocks(len(storageDisks))
result.DataBlocks = getDefaultDataBlocks(len(storageDisks)) result.DataBlocks = getDefaultDataBlocks(len(storageDisks))
} else { } else {
result.ParityBlocks = latestXLMeta.Erasure.ParityBlocks result.ParityBlocks = latestFileInfo.Erasure.ParityBlocks
result.DataBlocks = latestXLMeta.Erasure.DataBlocks result.DataBlocks = latestFileInfo.Erasure.DataBlocks
} }
return result return result
@ -616,7 +615,7 @@ func statAllDirs(ctx context.Context, storageDisks []StorageAPI, bucket, prefix
} }
index := index index := index
g.Go(func() error { g.Go(func() error {
entries, err := storageDisks[index].ListDir(bucket, prefix, 1, "") entries, err := storageDisks[index].ListDir(bucket, prefix, 1)
if err != nil { if err != nil {
return err return err
} }
@ -655,23 +654,23 @@ func isObjectDirDangling(errs []error) (ok bool) {
// Object is considered dangling/corrupted if any only // Object is considered dangling/corrupted if any only
// if total disks - a combination of corrupted and missing // if total disks - a combination of corrupted and missing
// files is lesser than number of data blocks. // files is lesser than number of data blocks.
func isObjectDangling(metaArr []xlMetaV1, errs []error, dataErrs []error) (validMeta xlMetaV1, ok bool) { func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (validMeta FileInfo, ok bool) {
// We can consider an object data not reliable // We can consider an object data not reliable
// when xl.json is not found in read quorum disks. // when er.meta is not found in read quorum disks.
// or when xl.json is not readable in read quorum disks. // or when er.meta is not readable in read quorum disks.
var notFoundXLJSON, corruptedXLJSON int var notFoundErasureJSON, corruptedErasureJSON int
for _, readErr := range errs { for _, readErr := range errs {
if readErr == errFileNotFound { if readErr == errFileNotFound {
notFoundXLJSON++ notFoundErasureJSON++
} else if readErr == errCorruptedFormat { } else if readErr == errCorruptedFormat {
corruptedXLJSON++ corruptedErasureJSON++
} }
} }
var notFoundParts int var notFoundParts int
for i := range dataErrs { for i := range dataErrs {
// Only count part errors, if the error is not // Only count part errors, if the error is not
// same as xl.json error. This is to avoid // same as er.meta error. This is to avoid
// double counting when both parts and xl.json // double counting when both parts and er.meta
// are not available. // are not available.
if errs[i] != dataErrs[i] { if errs[i] != dataErrs[i] {
if dataErrs[i] == errFileNotFound { if dataErrs[i] == errFileNotFound {
@ -694,11 +693,11 @@ func isObjectDangling(metaArr []xlMetaV1, errs []error, dataErrs []error) (valid
} }
// We have valid meta, now verify if we have enough files with parity blocks. // We have valid meta, now verify if we have enough files with parity blocks.
return validMeta, corruptedXLJSON+notFoundXLJSON+notFoundParts > validMeta.Erasure.ParityBlocks return validMeta, corruptedErasureJSON+notFoundErasureJSON+notFoundParts > validMeta.Erasure.ParityBlocks
} }
// HealObject - heal the given object, automatically deletes the object if stale/corrupted if `remove` is true. // HealObject - heal the given object, automatically deletes the object if stale/corrupted if `remove` is true.
func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) { func (er erasureObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (hr madmin.HealResultItem, err error) {
// Create context that also contains information about the object and bucket. // Create context that also contains information about the object and bucket.
// The top level handler might not have this information. // The top level handler might not have this information.
reqInfo := logger.GetReqInfo(ctx) reqInfo := logger.GetReqInfo(ctx)
@ -712,14 +711,14 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts
// Healing directories handle it separately. // Healing directories handle it separately.
if HasSuffix(object, SlashSeparator) { if HasSuffix(object, SlashSeparator) {
return xl.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove) return er.healObjectDir(healCtx, bucket, object, opts.DryRun, opts.Remove)
} }
storageDisks := xl.getDisks() storageDisks := er.getDisks()
storageEndpoints := xl.getEndpoints() storageEndpoints := er.getEndpoints()
// Read metadata files from all the disks // Read metadata files from all the disks
partsMetadata, errs := readAllXLMetadata(healCtx, storageDisks, bucket, object) partsMetadata, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID)
// Check if the object is dangling, if yes and user requested // Check if the object is dangling, if yes and user requested
// remove we simply delete it from namespace. // remove we simply delete it from namespace.
@ -729,15 +728,15 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts
writeQuorum = getWriteQuorum(len(storageDisks)) writeQuorum = getWriteQuorum(len(storageDisks))
} }
if !opts.DryRun && opts.Remove { if !opts.DryRun && opts.Remove {
xl.deleteObject(healCtx, bucket, object, writeQuorum, false) er.deleteObject(healCtx, bucket, object, writeQuorum)
} }
err = reduceReadQuorumErrs(ctx, errs, nil, writeQuorum-1) err = reduceReadQuorumErrs(ctx, errs, nil, writeQuorum-1)
return defaultHealResult(xlMetaV1{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object)
} }
latestXLMeta, err := getLatestXLMeta(healCtx, partsMetadata, errs) latestFileInfo, err := getLatestFileInfo(healCtx, partsMetadata, errs)
if err != nil { if err != nil {
return defaultHealResult(xlMetaV1{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) return defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object)
} }
errCount := 0 errCount := 0
@ -751,20 +750,20 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, opts
// Only if we get errors from all the disks we return error. Else we need to // Only if we get errors from all the disks we return error. Else we need to
// continue to return filled madmin.HealResultItem struct which includes info // continue to return filled madmin.HealResultItem struct which includes info
// on what disks the file is available etc. // on what disks the file is available etc.
if err = reduceReadQuorumErrs(ctx, errs, nil, latestXLMeta.Erasure.DataBlocks); err != nil { if err = reduceReadQuorumErrs(ctx, errs, nil, latestFileInfo.Erasure.DataBlocks); err != nil {
if m, ok := isObjectDangling(partsMetadata, errs, []error{}); ok { if m, ok := isObjectDangling(partsMetadata, errs, []error{}); ok {
writeQuorum := m.Erasure.DataBlocks + 1 writeQuorum := m.Erasure.DataBlocks + 1
if m.Erasure.DataBlocks == 0 { if m.Erasure.DataBlocks == 0 {
writeQuorum = getWriteQuorum(len(storageDisks)) writeQuorum = getWriteQuorum(len(storageDisks))
} }
if !opts.DryRun && opts.Remove { if !opts.DryRun && opts.Remove {
xl.deleteObject(ctx, bucket, object, writeQuorum, false) er.deleteObject(ctx, bucket, object, writeQuorum)
} }
} }
return defaultHealResult(latestXLMeta, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object) return defaultHealResult(latestFileInfo, storageDisks, storageEndpoints, errs, bucket, object), toObjectErr(err, bucket, object)
} }
} }
// Heal the object. // Heal the object.
return xl.healObject(healCtx, bucket, object, partsMetadata, errs, latestXLMeta, opts.DryRun, opts.Remove, opts.ScanMode) return er.healObject(healCtx, bucket, object, partsMetadata, errs, latestFileInfo, opts.DryRun, opts.Remove, opts.ScanMode)
} }

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,12 +19,127 @@ package cmd
import ( import (
"bytes" "bytes"
"context" "context"
"path/filepath" "crypto/rand"
"os"
"path"
"reflect"
"testing" "testing"
"time"
"github.com/dustin/go-humanize"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
) )
// Tests both object and bucket healing.
func TestHealing(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal(err)
}
defer removeRoots(fsDirs)
z := obj.(*erasureZones)
er := z.zones[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil {
t.Fatal(err)
}
bucket := "bucket"
object := "object"
data := make([]byte, 1*humanize.MiByte)
length := int64(len(data))
_, err = rand.Read(data)
if err != nil {
t.Fatal(err)
}
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{})
if err != nil {
t.Fatal(err)
}
disk := er.getDisks()[0]
fileInfoPreHeal, err := disk.ReadVersion(bucket, object, "")
if err != nil {
t.Fatal(err)
}
// Remove the object - to simulate the case where the disk was down when the object
// was created.
err = removeAll(pathJoin(disk.String(), bucket, object))
if err != nil {
t.Fatal(err)
}
_, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil {
t.Fatal(err)
}
fileInfoPostHeal, err := disk.ReadVersion(bucket, object, "")
if err != nil {
t.Fatal(err)
}
// After heal the meta file should be as expected.
if !reflect.DeepEqual(fileInfoPreHeal, fileInfoPostHeal) {
t.Fatal("HealObject failed")
}
err = os.RemoveAll(path.Join(fsDirs[0], bucket, object, "er.meta"))
if err != nil {
t.Fatal(err)
}
// Write er.meta with different modtime to simulate the case where a disk had
// gone down when an object was replaced by a new object.
fileInfoOutDated := fileInfoPreHeal
fileInfoOutDated.ModTime = time.Now()
err = disk.WriteMetadata(bucket, object, fileInfoOutDated)
if err != nil {
t.Fatal(err)
}
_, err = er.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan})
if err != nil {
t.Fatal(err)
}
fileInfoPostHeal, err = disk.ReadVersion(bucket, object, "")
if err != nil {
t.Fatal(err)
}
// After heal the meta file should be as expected.
if !reflect.DeepEqual(fileInfoPreHeal, fileInfoPostHeal) {
t.Fatal("HealObject failed")
}
// Remove the bucket - to simulate the case where bucket was
// created when the disk was down.
err = os.RemoveAll(path.Join(fsDirs[0], bucket))
if err != nil {
t.Fatal(err)
}
// This would create the bucket.
_, err = er.HealBucket(ctx, bucket, false, false)
if err != nil {
t.Fatal(err)
}
// Stat the bucket to make sure that it was created.
_, err = er.getDisks()[0].StatVol(bucket)
if err != nil {
t.Fatal(err)
}
}
func TestHealObjectCorrupted(t *testing.T) { func TestHealObjectCorrupted(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -51,7 +166,7 @@ func TestHealObjectCorrupted(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024) data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions var opts ObjectOptions
err = objLayer.MakeBucketWithLocation(ctx, bucket, "", false) err = objLayer.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -81,91 +196,96 @@ func TestHealObjectCorrupted(t *testing.T) {
} }
// Test 1: Remove the object backend files from the first disk. // Test 1: Remove the object backend files from the first disk.
z := objLayer.(*xlZones) z := objLayer.(*erasureZones)
xl := z.zones[0].sets[0] er := z.zones[0].sets[0]
firstDisk := xl.getDisks()[0] erasureDisks := er.getDisks()
err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile)) firstDisk := erasureDisks[0]
err = firstDisk.DeleteFile(bucket, pathJoin(object, xlStorageFormatFile))
if err != nil { if err != nil {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }
_, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil { if err != nil {
t.Fatalf("Failed to heal object - %v", err) t.Fatalf("Failed to heal object - %v", err)
} }
_, err = firstDisk.StatFile(bucket, filepath.Join(object, xlMetaJSONFile)) fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "")
fi, err := getLatestFileInfo(ctx, fileInfos, errs)
if err != nil { if err != nil {
t.Errorf("Expected xl.json file to be present but stat failed - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
} }
// Test 2: Heal when part.1 is empty if err = firstDisk.CheckFile(bucket, object); err != nil {
partSt1, err := firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) t.Errorf("Expected er.meta file to be present but stat failed - %v", err)
if err != nil {
t.Errorf("Expected part.1 file to be present but stat failed - %v", err)
} }
err = firstDisk.DeleteFile(bucket, filepath.Join(object, "part.1"))
err = firstDisk.DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil { if err != nil {
t.Errorf("Failure during deleting part.1 - %v", err) t.Errorf("Failure during deleting part.1 - %v", err)
} }
err = firstDisk.WriteAll(bucket, filepath.Join(object, "part.1"), bytes.NewReader([]byte{}))
err = firstDisk.WriteAll(bucket, pathJoin(object, fi.DataDir, "part.1"), bytes.NewReader([]byte{}))
if err != nil { if err != nil {
t.Errorf("Failure during creating part.1 - %v", err) t.Errorf("Failure during creating part.1 - %v", err)
} }
_, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
if err != nil { if err != nil {
t.Errorf("Expected nil but received %v", err) t.Errorf("Expected nil but received %v", err)
} }
partSt2, err := firstDisk.StatFile(bucket, filepath.Join(object, "part.1"))
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "")
nfi, err := getLatestFileInfo(ctx, fileInfos, errs)
if err != nil { if err != nil {
t.Errorf("Expected from part.1 file to be present but stat failed - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
if partSt1.Size != partSt2.Size {
t.Errorf("part.1 file size is not the same before and after heal")
} }
// Test 3: Heal when part.1 is correct in size but corrupted if !reflect.DeepEqual(fi, nfi) {
partSt1, err = firstDisk.StatFile(bucket, filepath.Join(object, "part.1")) t.Fatalf("FileInfo not equal after healing")
if err != nil {
t.Errorf("Expected part.1 file to be present but stat failed - %v", err)
} }
err = firstDisk.DeleteFile(bucket, filepath.Join(object, "part.1"))
err = firstDisk.DeleteFile(bucket, pathJoin(object, fi.DataDir, "part.1"))
if err != nil { if err != nil {
t.Errorf("Failure during deleting part.1 - %v", err) t.Errorf("Failure during deleting part.1 - %v", err)
} }
bdata := bytes.Repeat([]byte("b"), int(partSt1.Size))
err = firstDisk.WriteAll(bucket, filepath.Join(object, "part.1"), bytes.NewReader(bdata)) bdata := bytes.Repeat([]byte("b"), int(nfi.Size))
err = firstDisk.WriteAll(bucket, pathJoin(object, fi.DataDir, "part.1"), bytes.NewReader(bdata))
if err != nil { if err != nil {
t.Errorf("Failure during creating part.1 - %v", err) t.Errorf("Failure during creating part.1 - %v", err)
} }
_, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
_, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
if err != nil { if err != nil {
t.Errorf("Expected nil but received %v", err) t.Errorf("Expected nil but received %v", err)
} }
partSt2, err = firstDisk.StatFile(bucket, filepath.Join(object, "part.1"))
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "")
nfi, err = getLatestFileInfo(ctx, fileInfos, errs)
if err != nil { if err != nil {
t.Errorf("Expected from part.1 file to be present but stat failed - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
}
if partSt1.Size != partSt2.Size {
t.Errorf("part.1 file size is not the same before and after heal")
} }
// Test 4: checks if HealObject returns an error when xl.json is not found if !reflect.DeepEqual(fi, nfi) {
t.Fatalf("FileInfo not equal after healing")
}
// Test 4: checks if HealObject returns an error when xl.meta is not found
// in more than read quorum number of disks, to create a corrupted situation. // in more than read quorum number of disks, to create a corrupted situation.
for i := 0; i <= len(er.getDisks())/2; i++ {
for i := 0; i <= len(xl.getDisks())/2; i++ { er.getDisks()[i].DeleteFile(bucket, pathJoin(object, xlStorageFormatFile))
xl.getDisks()[i].DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
} }
// Try healing now, expect to receive errFileNotFound. // Try healing now, expect to receive errFileNotFound.
_, err = objLayer.HealObject(ctx, bucket, object, madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan}) _, err = objLayer.HealObject(ctx, bucket, object, "", madmin.HealOpts{DryRun: false, Remove: true, ScanMode: madmin.HealDeepScan})
if err != nil { if err != nil {
if _, ok := err.(ObjectNotFound); !ok { if _, ok := err.(ObjectNotFound); !ok {
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err) t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
} }
} }
// since majority of xl.jsons are not available, object should be successfully deleted. // since majority of xl.meta's are not available, object should be successfully deleted.
_, err = objLayer.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) _, err = objLayer.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if _, ok := err.(ObjectNotFound); !ok { if _, ok := err.(ObjectNotFound); !ok {
t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err) t.Errorf("Expect %v but received %v", ObjectNotFound{Bucket: bucket, Object: object}, err)
@ -173,7 +293,7 @@ func TestHealObjectCorrupted(t *testing.T) {
} }
// Tests healing of object. // Tests healing of object.
func TestHealObjectXL(t *testing.T) { func TestHealObjectErasure(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -196,7 +316,7 @@ func TestHealObjectXL(t *testing.T) {
data := bytes.Repeat([]byte("a"), 5*1024*1024) data := bytes.Repeat([]byte("a"), 5*1024*1024)
var opts ObjectOptions var opts ObjectOptions
err = obj.MakeBucketWithLocation(ctx, bucket, "", false) err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -220,51 +340,51 @@ func TestHealObjectXL(t *testing.T) {
}) })
} }
// Remove the object backend files from the first disk.
z := obj.(*erasureZones)
er := z.zones[0].sets[0]
firstDisk := er.getDisks()[0]
_, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{}) _, err = obj.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to complete multipart upload - %v", err) t.Fatalf("Failed to complete multipart upload - %v", err)
} }
// Remove the object backend files from the first disk. err = firstDisk.DeleteFile(bucket, pathJoin(object, xlStorageFormatFile))
z := obj.(*xlZones)
xl := z.zones[0].sets[0]
firstDisk := xl.getDisks()[0]
err = firstDisk.DeleteFile(bucket, filepath.Join(object, xlMetaJSONFile))
if err != nil { if err != nil {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }
_, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) _, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil { if err != nil {
t.Fatalf("Failed to heal object - %v", err) t.Fatalf("Failed to heal object - %v", err)
} }
_, err = firstDisk.StatFile(bucket, filepath.Join(object, xlMetaJSONFile)) if err = firstDisk.CheckFile(bucket, object); err != nil {
if err != nil { t.Errorf("Expected er.meta file to be present but stat failed - %v", err)
t.Errorf("Expected xl.json file to be present but stat failed - %v", err)
} }
xlDisks := xl.getDisks() erasureDisks := er.getDisks()
z.zones[0].xlDisksMu.Lock() z.zones[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI { er.getDisks = func() []StorageAPI {
// Nil more than half the disks, to remove write quorum. // Nil more than half the disks, to remove write quorum.
for i := 0; i <= len(xlDisks)/2; i++ { for i := 0; i <= len(erasureDisks)/2; i++ {
xlDisks[i] = nil erasureDisks[i] = nil
} }
return xlDisks return erasureDisks
} }
z.zones[0].xlDisksMu.Unlock() z.zones[0].erasureDisksMu.Unlock()
// Try healing now, expect to receive errDiskNotFound. // Try healing now, expect to receive errDiskNotFound.
_, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan}) _, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealDeepScan})
// since majority of xl.jsons are not available, object quorum can't be read properly and error will be errXLReadQuorum // since majority of er.meta's are not available, object quorum can't be read properly and error will be errErasureReadQuorum
if _, ok := err.(InsufficientReadQuorum); !ok { if _, ok := err.(InsufficientReadQuorum); !ok {
t.Errorf("Expected %v but received %v", InsufficientReadQuorum{}, err) t.Errorf("Expected %v but received %v", InsufficientReadQuorum{}, err)
} }
} }
// Tests healing of empty directories // Tests healing of empty directories
func TestHealEmptyDirectoryXL(t *testing.T) { func TestHealEmptyDirectoryErasure(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
@ -285,7 +405,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
object := "empty-dir/" object := "empty-dir/"
var opts ObjectOptions var opts ObjectOptions
err = obj.MakeBucketWithLocation(ctx, bucket, "", false) err = obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket - %v", err) t.Fatalf("Failed to make a bucket - %v", err)
} }
@ -298,16 +418,16 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
} }
// Remove the object backend files from the first disk. // Remove the object backend files from the first disk.
z := obj.(*xlZones) z := obj.(*erasureZones)
xl := z.zones[0].sets[0] er := z.zones[0].sets[0]
firstDisk := xl.getDisks()[0] firstDisk := er.getDisks()[0]
err = firstDisk.DeleteFile(bucket, object) err = firstDisk.DeleteFile(bucket, object)
if err != nil { if err != nil {
t.Fatalf("Failed to delete a file - %v", err) t.Fatalf("Failed to delete a file - %v", err)
} }
// Heal the object // Heal the object
hr, err := obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) hr, err := obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil { if err != nil {
t.Fatalf("Failed to heal object - %v", err) t.Fatalf("Failed to heal object - %v", err)
} }
@ -331,7 +451,7 @@ func TestHealEmptyDirectoryXL(t *testing.T) {
} }
// Heal the same object again // Heal the same object again
hr, err = obj.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan}) hr, err = obj.HealObject(ctx, bucket, object, "", madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil { if err != nil {
t.Fatalf("Failed to heal object - %v", err) t.Fatalf("Failed to heal object - %v", err)
} }

View File

@ -0,0 +1,58 @@
/*
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"github.com/minio/minio/pkg/madmin"
)
// ListObjectVersions - This is not implemented, look for erasure-zones.ListObjectVersions()
func (er erasureObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) {
return loi, NotImplemented{}
}
// ListObjectsV2 - This is not implemented/needed anymore, look for erasure-zones.ListObjectsV2()
func (er erasureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi ListObjectsV2Info, e error) {
return loi, NotImplemented{}
}
// ListObjects - This is not implemented/needed anymore, look for erasure-zones.ListObjects()
func (er erasureObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, NotImplemented{}
}
// ListBucketsHeal - This is not implemented/needed anymore, look for erasure-zones.ListBucketHeal()
func (er erasureObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
return nil, NotImplemented{}
}
// ListObjectsHeal - This is not implemented, look for erasure-zones.ListObjectsHeal()
func (er erasureObjects) ListObjectsHeal(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) {
return ListObjectsInfo{}, NotImplemented{}
}
// HealObjects - This is not implemented/needed anymore, look for erasure-zones.HealObjects()
func (er erasureObjects) HealObjects(ctx context.Context, bucket, prefix string, _ madmin.HealOpts, _ HealObjectFn) (e error) {
return NotImplemented{}
}
// Walk - This is not implemented/needed anymore, look for erasure-zones.Walk()
func (er erasureObjects) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error {
return NotImplemented{}
}

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

View File

@ -20,9 +20,7 @@ import (
"context" "context"
"errors" "errors"
"hash/crc32" "hash/crc32"
"path"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/sync/errgroup" "github.com/minio/minio/pkg/sync/errgroup"
) )
@ -72,13 +70,13 @@ func reduceQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, qu
// reduceReadQuorumErrs behaves like reduceErrs but only for returning // reduceReadQuorumErrs behaves like reduceErrs but only for returning
// values of maximally occurring errors validated against readQuorum. // values of maximally occurring errors validated against readQuorum.
func reduceReadQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, readQuorum int) (maxErr error) { func reduceReadQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, readQuorum int) (maxErr error) {
return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errXLReadQuorum) return reduceQuorumErrs(ctx, errs, ignoredErrs, readQuorum, errErasureReadQuorum)
} }
// reduceWriteQuorumErrs behaves like reduceErrs but only for returning // reduceWriteQuorumErrs behaves like reduceErrs but only for returning
// values of maximally occurring errors validated against writeQuorum. // values of maximally occurring errors validated against writeQuorum.
func reduceWriteQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) { func reduceWriteQuorumErrs(ctx context.Context, errs []error, ignoredErrs []error, writeQuorum int) (maxErr error) {
return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errXLWriteQuorum) return reduceQuorumErrs(ctx, errs, ignoredErrs, writeQuorum, errErasureWriteQuorum)
} }
// Similar to 'len(slice)' but returns the actual elements count // Similar to 'len(slice)' but returns the actual elements count
@ -115,44 +113,26 @@ func hashOrder(key string, cardinality int) []int {
return nums return nums
} }
// Constructs xlMetaV1 using `jsoniter` lib. // Reads all `xl.meta` metadata as a FileInfo slice.
func xlMetaV1UnmarshalJSON(ctx context.Context, xlMetaBuf []byte) (xlMeta xlMetaV1, err error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(xlMetaBuf, &xlMeta)
return xlMeta, err
}
// readXLMeta reads `xl.json` and returns back XL metadata structure.
func readXLMeta(ctx context.Context, disk StorageAPI, bucket string, object string) (xlMeta xlMetaV1, err error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
if err != errFileNotFound && err != errVolumeNotFound {
logger.GetReqInfo(ctx).AppendTags("disk", disk.String())
logger.LogIf(ctx, err)
}
return xlMetaV1{}, err
}
if len(xlMetaBuf) == 0 {
return xlMetaV1{}, errFileNotFound
}
return xlMetaV1UnmarshalJSON(ctx, xlMetaBuf)
}
// Reads all `xl.json` metadata as a xlMetaV1 slice.
// Returns error slice indicating the failed metadata reads. // Returns error slice indicating the failed metadata reads.
func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object string) ([]xlMetaV1, []error) { func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, versionID string) ([]FileInfo, []error) {
metadataArray := make([]xlMetaV1, len(disks)) metadataArray := make([]FileInfo, len(disks))
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
// Read `xl.json` parallelly across disks. // Read `xl.meta` parallelly across disks.
for index := range disks { for index := range disks {
index := index index := index
g.Go(func() (err error) { g.Go(func() (err error) {
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
} }
metadataArray[index], err = readXLMeta(ctx, disks[index], bucket, object) metadataArray[index], err = disks[index].ReadVersion(bucket, object, versionID)
if err != nil {
if err != errFileNotFound && err != errVolumeNotFound && err != errFileVersionNotFound {
logger.GetReqInfo(ctx).AppendTags("disk", disks[index].String())
logger.LogIf(ctx, err)
}
}
return err return err
}, index) }, index)
} }
@ -162,11 +142,11 @@ func readAllXLMetadata(ctx context.Context, disks []StorageAPI, bucket, object s
} }
// Return shuffled partsMetadata depending on distribution. // Return shuffled partsMetadata depending on distribution.
func shufflePartsMetadata(partsMetadata []xlMetaV1, distribution []int) (shuffledPartsMetadata []xlMetaV1) { func shufflePartsMetadata(partsMetadata []FileInfo, distribution []int) (shuffledPartsMetadata []FileInfo) {
if distribution == nil { if distribution == nil {
return partsMetadata return partsMetadata
} }
shuffledPartsMetadata = make([]xlMetaV1, len(partsMetadata)) shuffledPartsMetadata = make([]FileInfo, len(partsMetadata))
// Shuffle slice xl metadata for expected distribution. // Shuffle slice xl metadata for expected distribution.
for index := range partsMetadata { for index := range partsMetadata {
blockIndex := distribution[index] blockIndex := distribution[index]

View File

@ -0,0 +1,201 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"reflect"
"testing"
)
// Tests caclculating disk count.
func TestDiskCount(t *testing.T) {
testCases := []struct {
disks []StorageAPI
diskCount int
}{
// Test case - 1
{
disks: []StorageAPI{&xlStorage{}, &xlStorage{}, &xlStorage{}, &xlStorage{}},
diskCount: 4,
},
// Test case - 2
{
disks: []StorageAPI{nil, &xlStorage{}, &xlStorage{}, &xlStorage{}},
diskCount: 3,
},
}
for i, testCase := range testCases {
cdiskCount := diskCount(testCase.disks)
if cdiskCount != testCase.diskCount {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.diskCount, cdiskCount)
}
}
}
// Test for reduceErrs, reduceErr reduces collection
// of errors into a single maximal error with in the list.
func TestReduceErrs(t *testing.T) {
// List all of all test cases to validate various cases of reduce errors.
testCases := []struct {
errs []error
ignoredErrs []error
err error
}{
// Validate if have reduced properly.
{[]error{
errDiskNotFound,
errDiskNotFound,
errDiskFull,
}, []error{}, errErasureReadQuorum},
// Validate if have no consensus.
{[]error{
errDiskFull,
errDiskNotFound,
nil, nil,
}, []error{}, errErasureReadQuorum},
// Validate if have consensus and errors ignored.
{[]error{
errVolumeNotFound,
errVolumeNotFound,
errVolumeNotFound,
errVolumeNotFound,
errVolumeNotFound,
errDiskNotFound,
errDiskNotFound,
}, []error{errDiskNotFound}, errVolumeNotFound},
{[]error{}, []error{}, errErasureReadQuorum},
{[]error{errFileNotFound, errFileNotFound, errFileNotFound,
errFileNotFound, errFileNotFound, nil, nil, nil, nil, nil},
nil, nil},
}
// Validates list of all the testcases for returning valid errors.
for i, testCase := range testCases {
gotErr := reduceReadQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 5)
if gotErr != testCase.err {
t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr)
}
gotNewErr := reduceWriteQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 6)
if gotNewErr != errErasureWriteQuorum {
t.Errorf("Test %d : expected %s, got %s", i+1, errErasureWriteQuorum, gotErr)
}
}
}
// TestHashOrder - test order of ints in array
func TestHashOrder(t *testing.T) {
testCases := []struct {
objectName string
hashedOrder []int
}{
// cases which should pass the test.
// passing in valid object name.
{"object", []int{14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}},
{"The Shining Script <v1>.pdf", []int{16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}},
{"Cost Benefit Analysis (2009-2010).pptx", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},
{"SHØRT", []int{11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
{"There are far too many object names, and far too few bucket names!", []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
{"a/b/c/", []int{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2}},
{"/a/b/c", []int{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1, 2, 3, 4, 5}},
{string([]byte{0xff, 0xfe, 0xfd}), []int{15, 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}},
}
// Tests hashing order to be consistent.
for i, testCase := range testCases {
hashedOrder := hashOrder(testCase.objectName, 16)
if !reflect.DeepEqual(testCase.hashedOrder, hashedOrder) {
t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.hashedOrder, hashedOrder)
}
}
// Tests hashing order to fail for when order is '-1'.
if hashedOrder := hashOrder("This will fail", -1); hashedOrder != nil {
t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder)
}
if hashedOrder := hashOrder("This will fail", 0); hashedOrder != nil {
t.Errorf("Test: Expect \"nil\" but failed \"%#v\"", hashedOrder)
}
}
func TestShuffleDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...))
if err != nil {
removeRoots(disks)
t.Fatal(err)
}
defer removeRoots(disks)
z := objLayer.(*erasureZones)
testShuffleDisks(t, z)
}
// Test shuffleDisks which returns shuffled slice of disks for their actual distribution.
func testShuffleDisks(t *testing.T, z *erasureZones) {
disks := z.zones[0].GetDisks(0)()
distribution := []int{16, 14, 12, 10, 8, 6, 4, 2, 1, 3, 5, 7, 9, 11, 13, 15}
shuffledDisks := shuffleDisks(disks, distribution)
// From the "distribution" above you can notice that:
// 1st data block is in the 9th disk (i.e distribution index 8)
// 2nd data block is in the 8th disk (i.e distribution index 7) and so on.
if shuffledDisks[0] != disks[8] ||
shuffledDisks[1] != disks[7] ||
shuffledDisks[2] != disks[9] ||
shuffledDisks[3] != disks[6] ||
shuffledDisks[4] != disks[10] ||
shuffledDisks[5] != disks[5] ||
shuffledDisks[6] != disks[11] ||
shuffledDisks[7] != disks[4] ||
shuffledDisks[8] != disks[12] ||
shuffledDisks[9] != disks[3] ||
shuffledDisks[10] != disks[13] ||
shuffledDisks[11] != disks[2] ||
shuffledDisks[12] != disks[14] ||
shuffledDisks[13] != disks[1] ||
shuffledDisks[14] != disks[15] ||
shuffledDisks[15] != disks[0] {
t.Errorf("shuffleDisks returned incorrect order.")
}
}
// TestEvalDisks tests the behavior of evalDisks
func TestEvalDisks(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nDisks := 16
disks, err := getRandomDisks(nDisks)
if err != nil {
t.Fatal(err)
}
objLayer, _, err := initObjectLayer(ctx, mustGetZoneEndpoints(disks...))
if err != nil {
removeRoots(disks)
t.Fatal(err)
}
defer removeRoots(disks)
z := objLayer.(*erasureZones)
testShuffleDisks(t, z)
}

326
cmd/erasure-metadata.go Normal file
View File

@ -0,0 +1,326 @@
/*
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"encoding/hex"
"fmt"
"net/http"
"sort"
"time"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/sync/errgroup"
"github.com/minio/sha256-simd"
)
const erasureAlgorithm = "rs-vandermonde"
// byObjectPartNumber is a collection satisfying sort.Interface.
type byObjectPartNumber []ObjectPartInfo
func (t byObjectPartNumber) Len() int { return len(t) }
func (t byObjectPartNumber) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t byObjectPartNumber) Less(i, j int) bool { return t[i].Number < t[j].Number }
// AddChecksumInfo adds a checksum of a part.
func (e *ErasureInfo) AddChecksumInfo(ckSumInfo ChecksumInfo) {
for i, sum := range e.Checksums {
if sum.PartNumber == ckSumInfo.PartNumber {
e.Checksums[i] = ckSumInfo
return
}
}
e.Checksums = append(e.Checksums, ckSumInfo)
}
// GetChecksumInfo - get checksum of a part.
func (e ErasureInfo) GetChecksumInfo(partNumber int) (ckSum ChecksumInfo) {
for _, sum := range e.Checksums {
if sum.PartNumber == partNumber {
// Return the checksum
return sum
}
}
return ChecksumInfo{}
}
// ShardFileSize - returns final erasure size from original size.
func (e ErasureInfo) ShardFileSize(totalLength int64) int64 {
if totalLength == 0 {
return 0
}
if totalLength == -1 {
return -1
}
numShards := totalLength / e.BlockSize
lastBlockSize := totalLength % e.BlockSize
lastShardSize := ceilFrac(lastBlockSize, int64(e.DataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
// ShardSize - returns actual shared size from erasure blockSize.
func (e ErasureInfo) ShardSize() int64 {
return ceilFrac(e.BlockSize, int64(e.DataBlocks))
}
// IsValid - tells if erasure info fields are valid.
func (fi FileInfo) IsValid() bool {
if fi.Deleted {
// Delete marker has no data, no need to check
// for erasure coding information
return true
}
data := fi.Erasure.DataBlocks
parity := fi.Erasure.ParityBlocks
return ((data >= parity) && (data != 0) && (parity != 0))
}
// ToObjectInfo - Converts metadata to object info.
func (fi FileInfo) ToObjectInfo(bucket, object string) ObjectInfo {
if HasSuffix(object, SlashSeparator) {
return ObjectInfo{
Bucket: bucket,
Name: object,
IsDir: true,
}
}
objInfo := ObjectInfo{
IsDir: false,
Bucket: bucket,
Name: object,
VersionID: fi.VersionID,
IsLatest: fi.IsLatest,
DeleteMarker: fi.Deleted,
Size: fi.Size,
ModTime: fi.ModTime,
ContentType: fi.Metadata["content-type"],
ContentEncoding: fi.Metadata["content-encoding"],
}
// Update expires
var (
t time.Time
e error
)
if exp, ok := fi.Metadata["expires"]; ok {
if t, e = time.Parse(http.TimeFormat, exp); e == nil {
objInfo.Expires = t.UTC()
}
}
objInfo.backendType = BackendErasure
// Extract etag from metadata.
objInfo.ETag = extractETag(fi.Metadata)
// Add user tags to the object info
objInfo.UserTags = fi.Metadata[xhttp.AmzObjectTagging]
// etag/md5Sum has already been extracted. We need to
// remove to avoid it from appearing as part of
// response headers. e.g, X-Minio-* or X-Amz-*.
// Tags have also been extracted, we remove that as well.
objInfo.UserDefined = cleanMetadata(fi.Metadata)
// All the parts per object.
objInfo.Parts = fi.Parts
// Update storage class
if sc, ok := fi.Metadata[xhttp.AmzStorageClass]; ok {
objInfo.StorageClass = sc
} else {
objInfo.StorageClass = globalMinioDefaultStorageClass
}
// Success.
return objInfo
}
// objectPartIndex - returns the index of matching object part number.
func objectPartIndex(parts []ObjectPartInfo, partNumber int) int {
for i, part := range parts {
if partNumber == part.Number {
return i
}
}
return -1
}
// AddObjectPart - add a new object part in order.
func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize int64, actualSize int64) {
partInfo := ObjectPartInfo{
Number: partNumber,
ETag: partETag,
Size: partSize,
ActualSize: actualSize,
}
// Update part info if it already exists.
for i, part := range fi.Parts {
if partNumber == part.Number {
fi.Parts[i] = partInfo
return
}
}
// Proceed to include new part info.
fi.Parts = append(fi.Parts, partInfo)
// Parts in FileInfo should be in sorted order by part number.
sort.Sort(byObjectPartNumber(fi.Parts))
}
// ObjectToPartOffset - translate offset of an object to offset of its individual part.
func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) {
if offset == 0 {
// Special case - if offset is 0, then partIndex and partOffset are always 0.
return 0, 0, nil
}
partOffset = offset
// Seek until object offset maps to a particular part offset.
for i, part := range fi.Parts {
partIndex = i
// Offset is smaller than size we have reached the proper part offset.
if partOffset < part.Size {
return partIndex, partOffset, nil
}
// Continue to towards the next part.
partOffset -= part.Size
}
logger.LogIf(ctx, InvalidRange{})
// Offset beyond the size of the object return InvalidRange.
return 0, 0, InvalidRange{}
}
func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) {
metaHashes := make([]string, len(metaArr))
for i, meta := range metaArr {
if meta.IsValid() && meta.ModTime.Equal(modTime) {
h := sha256.New()
for _, part := range meta.Parts {
h.Write([]byte(fmt.Sprintf("part.%d", part.Number)))
}
metaHashes[i] = hex.EncodeToString(h.Sum(nil))
}
}
metaHashCountMap := make(map[string]int)
for _, hash := range metaHashes {
if hash == "" {
continue
}
metaHashCountMap[hash]++
}
maxHash := ""
maxCount := 0
for hash, count := range metaHashCountMap {
if count > maxCount {
maxCount = count
maxHash = hash
}
}
if maxCount < quorum {
return FileInfo{}, errErasureReadQuorum
}
for i, hash := range metaHashes {
if hash == maxHash {
return metaArr[i], nil
}
}
return FileInfo{}, errErasureReadQuorum
}
// pickValidFileInfo - picks one valid FileInfo content and returns from a
// slice of FileInfo.
func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Time, quorum int) (xmv FileInfo, e error) {
return findFileInfoInQuorum(ctx, metaArr, modTime, quorum)
}
// Rename metadata content to destination location for each disk concurrently.
func renameFileInfo(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, quorum int) ([]StorageAPI, error) {
ignoredErr := []error{errFileNotFound}
g := errgroup.WithNErrs(len(disks))
// Rename file on all underlying storage disks.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
if err := disks[index].RenameData(srcBucket, srcEntry, "", dstBucket, dstEntry); err != nil {
if !IsErrIgnored(err, ignoredErr...) {
return err
}
}
return nil
}, index)
}
// Wait for all renames to finish.
errs := g.Wait()
// We can safely allow RenameData errors up to len(er.getDisks()) - writeQuorum
// otherwise return failure. Cleanup successful renames.
err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, errs), err
}
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) {
g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
// Pick one FileInfo for a disk at index.
files[index].Erasure.Index = index + 1
return disks[index].WriteMetadata(bucket, prefix, files[index])
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, mErrs), err
}
// Returns per object readQuorum and writeQuorum
// readQuorum is the min required disks to read data.
// writeQuorum is the min required disks to write data.
func objectQuorumFromMeta(ctx context.Context, er erasureObjects, partsMetaData []FileInfo, errs []error) (objectReadQuorum, objectWriteQuorum int, err error) {
// get the latest updated Metadata and a count of all the latest updated FileInfo(s)
latestFileInfo, err := getLatestFileInfo(ctx, partsMetaData, errs)
if err != nil {
return 0, 0, err
}
// Since all the valid erasure code meta updated at the same time are equivalent, pass dataBlocks
// from latestFileInfo to get the quorum
return latestFileInfo.Erasure.DataBlocks, latestFileInfo.Erasure.DataBlocks + 1, nil
}

View File

@ -0,0 +1,153 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"strconv"
"testing"
humanize "github.com/dustin/go-humanize"
)
const ActualSize = 1000
// Test FileInfo.AddObjectPart()
func TestAddObjectPart(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{1, 0},
{2, 1},
{4, 2},
{5, 3},
{7, 4},
// Insert part.
{3, 2},
// Replace existing part.
{4, 3},
// Missing part.
{6, -1},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Test them.
for _, testCase := range testCases {
if testCase.expectedIndex > -1 {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test objectPartIndex(). generates a sample FileInfo data and asserts
// the output of objectPartIndex() with the expected value.
func TestObjectPartIndex(t *testing.T) {
testCases := []struct {
partNum int
expectedIndex int
}{
{2, 1},
{1, 0},
{5, 3},
{4, 2},
{7, 4},
}
// Setup.
fi := newFileInfo("test-object", 8, 8)
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
for _, testCase := range testCases {
partNumString := strconv.Itoa(testCase.partNum)
fi.AddObjectPart(testCase.partNum, "etag."+partNumString, int64(testCase.partNum+humanize.MiByte), ActualSize)
}
// Add failure test case.
testCases = append(testCases, struct {
partNum int
expectedIndex int
}{6, -1})
// Test them.
for _, testCase := range testCases {
if index := objectPartIndex(fi.Parts, testCase.partNum); index != testCase.expectedIndex {
t.Fatalf("%+v: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
}
}
// Test FileInfo.ObjectToPartOffset().
func TestObjectToPartOffset(t *testing.T) {
// Setup.
fi := newFileInfo("test-object", 8, 8)
if !fi.IsValid() {
t.Fatalf("unable to get xl meta")
}
// Add some parts for testing.
// Total size of all parts is 5,242,899 bytes.
for _, partNum := range []int{1, 2, 4, 5, 7} {
partNumString := strconv.Itoa(partNum)
fi.AddObjectPart(partNum, "etag."+partNumString, int64(partNum+humanize.MiByte), ActualSize)
}
testCases := []struct {
offset int64
expectedIndex int
expectedOffset int64
expectedErr error
}{
{0, 0, 0, nil},
{1 * humanize.MiByte, 0, 1 * humanize.MiByte, nil},
{1 + humanize.MiByte, 1, 0, nil},
{2 + humanize.MiByte, 1, 1, nil},
// Its valid for zero sized object.
{-1, 0, -1, nil},
// Max fffset is always (size - 1).
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte) - 1, 4, 1048582, nil},
// Error if offset is size.
{(1 + 2 + 4 + 5 + 7) + (5 * humanize.MiByte), 0, 0, InvalidRange{}},
}
// Test them.
for _, testCase := range testCases {
index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset)
if err != testCase.expectedErr {
t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err)
}
if index != testCase.expectedIndex {
t.Fatalf("%+v: index: expected = %d, got: %d", testCase, testCase.expectedIndex, index)
}
if offset != testCase.expectedOffset {
t.Fatalf("%+v: offset: expected = %d, got: %d", testCase, testCase.expectedOffset, offset)
}
}
}

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -24,7 +24,6 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time"
xhttp "github.com/minio/minio/cmd/http" xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
@ -32,24 +31,25 @@ import (
"github.com/minio/minio/pkg/sync/errgroup" "github.com/minio/minio/pkg/sync/errgroup"
) )
func (xl xlObjects) getUploadIDDir(bucket, object, uploadID string) string { func (er erasureObjects) getUploadIDDir(bucket, object, uploadID string) string {
return pathJoin(xl.getMultipartSHADir(bucket, object), uploadID) return pathJoin(er.getMultipartSHADir(bucket, object), uploadID)
} }
func (xl xlObjects) getMultipartSHADir(bucket, object string) string { func (er erasureObjects) getMultipartSHADir(bucket, object string) string {
return getSHA256Hash([]byte(pathJoin(bucket, object))) return getSHA256Hash([]byte(pathJoin(bucket, object)))
} }
// checkUploadIDExists - verify if a given uploadID exists and is valid. // checkUploadIDExists - verify if a given uploadID exists and is valid.
func (xl xlObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error { func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error {
_, err := xl.getObjectInfo(ctx, minioMetaMultipartBucket, xl.getUploadIDDir(bucket, object, uploadID), ObjectOptions{}) _, err := er.getObjectInfo(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID), ObjectOptions{})
return err return err
} }
// Removes part given by partName belonging to a mulitpart upload from minioMetaBucket // Removes part given by partName belonging to a mulitpart upload from minioMetaBucket
func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber int) { func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) {
curpartPath := pathJoin(xl.getUploadIDDir(bucket, object, uploadID), fmt.Sprintf("part.%d", partNumber)) uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
storageDisks := xl.getDisks() curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber))
storageDisks := er.getDisks()
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
for index, disk := range storageDisks { for index, disk := range storageDisks {
@ -59,7 +59,7 @@ func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber
index := index index := index
g.Go(func() error { g.Go(func() error {
// Ignoring failure to remove parts that weren't present in CompleteMultipartUpload // Ignoring failure to remove parts that weren't present in CompleteMultipartUpload
// requests. xl.json is the authoritative source of truth on which parts constitute // requests. xl.meta is the authoritative source of truth on which parts constitute
// the object. The presence of parts that don't belong in the object doesn't affect correctness. // the object. The presence of parts that don't belong in the object doesn't affect correctness.
_ = storageDisks[index].DeleteFile(minioMetaMultipartBucket, curpartPath) _ = storageDisks[index].DeleteFile(minioMetaMultipartBucket, curpartPath)
return nil return nil
@ -68,36 +68,6 @@ func (xl xlObjects) removeObjectPart(bucket, object, uploadID string, partNumber
g.Wait() g.Wait()
} }
// commitXLMetadata - commit `xl.json` from source prefix to destination prefix in the given slice of disks.
func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPrefix string, quorum int) ([]StorageAPI, error) {
srcJSONFile := path.Join(srcPrefix, xlMetaJSONFile)
dstJSONFile := path.Join(dstPrefix, xlMetaJSONFile)
g := errgroup.WithNErrs(len(disks))
// Rename `xl.json` to all disks in parallel.
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
// Delete any dangling directories.
defer disks[index].DeleteFile(srcBucket, srcPrefix)
// Renames `xl.json` from source prefix to destination prefix.
return disks[index].RenameFile(srcBucket, srcJSONFile, dstBucket, dstJSONFile)
}, index)
}
// Wait for all the routines.
mErrs := g.Wait()
err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum)
return evalDisks(disks, mErrs), err
}
// ListMultipartUploads - lists all the pending multipart // ListMultipartUploads - lists all the pending multipart
// uploads for a particular object in a bucket. // uploads for a particular object in a bucket.
// //
@ -105,17 +75,17 @@ func commitXLMetadata(ctx context.Context, disks []StorageAPI, srcBucket, srcPre
// not support prefix based listing, this is a deliberate attempt // not support prefix based listing, this is a deliberate attempt
// towards simplification of multipart APIs. // towards simplification of multipart APIs.
// The resulting ListMultipartsInfo structure is unmarshalled directly as XML. // The resulting ListMultipartsInfo structure is unmarshalled directly as XML.
func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) {
result.MaxUploads = maxUploads result.MaxUploads = maxUploads
result.KeyMarker = keyMarker result.KeyMarker = keyMarker
result.Prefix = object result.Prefix = object
result.Delimiter = delimiter result.Delimiter = delimiter
for _, disk := range xl.getLoadBalancedDisks() { for _, disk := range er.getLoadBalancedDisks() {
if disk == nil { if disk == nil {
continue continue
} }
uploadIDs, err := disk.ListDir(minioMetaMultipartBucket, xl.getMultipartSHADir(bucket, object), -1, "") uploadIDs, err := disk.ListDir(minioMetaMultipartBucket, er.getMultipartSHADir(bucket, object), -1)
if err != nil { if err != nil {
if err == errFileNotFound { if err == errFileNotFound {
return result, nil return result, nil
@ -147,16 +117,16 @@ func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, ke
// '.minio.sys/multipart/bucket/object/uploads.json' on all the // '.minio.sys/multipart/bucket/object/uploads.json' on all the
// disks. `uploads.json` carries metadata regarding on-going multipart // disks. `uploads.json` carries metadata regarding on-going multipart
// operation(s) on the object. // operation(s) on the object.
func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, object string, meta map[string]string) (string, error) { func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (string, error) {
onlineDisks := xl.getDisks() onlineDisks := er.getDisks()
parityBlocks := globalStorageClass.GetParityForSC(meta[xhttp.AmzStorageClass]) parityBlocks := globalStorageClass.GetParityForSC(opts.UserDefined[xhttp.AmzStorageClass])
if parityBlocks == 0 { if parityBlocks == 0 {
parityBlocks = len(onlineDisks) / 2 parityBlocks = len(onlineDisks) / 2
} }
dataBlocks := len(onlineDisks) - parityBlocks dataBlocks := len(onlineDisks) - parityBlocks
xlMeta := newXLMetaV1(object, dataBlocks, parityBlocks) fi := newFileInfo(object, dataBlocks, parityBlocks)
// we now know the number of blocks this object needs for data and parity. // we now know the number of blocks this object needs for data and parity.
// establish the writeQuorum using this data // establish the writeQuorum using this data
@ -165,30 +135,37 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec
writeQuorum = dataBlocks + 1 writeQuorum = dataBlocks + 1
} }
if meta["content-type"] == "" { if opts.UserDefined["content-type"] == "" {
contentType := mimedb.TypeByExtension(path.Ext(object)) contentType := mimedb.TypeByExtension(path.Ext(object))
meta["content-type"] = contentType opts.UserDefined["content-type"] = contentType
} }
xlMeta.Stat.ModTime = UTCNow()
xlMeta.Meta = meta // Calculate the version to be saved.
if opts.Versioned {
fi.VersionID = mustGetUUID()
}
fi.DataDir = mustGetUUID()
fi.ModTime = UTCNow()
fi.Metadata = opts.UserDefined
uploadID := mustGetUUID() uploadID := mustGetUUID()
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
tempUploadIDPath := uploadID tempUploadIDPath := uploadID
// Delete the tmp path later in case we fail to commit (ignore // Delete the tmp path later in case we fail to commit (ignore
// returned errors) - this will be a no-op in case of a commit // returned errors) - this will be a no-op in case of a commit
// success. // success.
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum, false) defer er.deleteObject(ctx, minioMetaTmpBucket, tempUploadIDPath, writeQuorum)
var partsMetadata = make([]xlMetaV1, len(onlineDisks)) var partsMetadata = make([]FileInfo, len(onlineDisks))
for i := range onlineDisks { for i := range onlineDisks {
partsMetadata[i] = xlMeta partsMetadata[i] = fi
} }
var err error var err error
// Write updated `xl.json` to all disks. // Write updated `xl.meta` to all disks.
onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum) onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, writeQuorum)
if err != nil { if err != nil {
return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath) return "", toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
} }
@ -208,12 +185,12 @@ func (xl xlObjects) newMultipartUpload(ctx context.Context, bucket string, objec
// subsequent request each UUID is unique. // subsequent request each UUID is unique.
// //
// Implements S3 compatible initiate multipart API. // Implements S3 compatible initiate multipart API.
func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) { func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (string, error) {
// No metadata is set, allocate a new one. // No metadata is set, allocate a new one.
if opts.UserDefined == nil { if opts.UserDefined == nil {
opts.UserDefined = make(map[string]string) opts.UserDefined = make(map[string]string)
} }
return xl.newMultipartUpload(ctx, bucket, object, opts.UserDefined) return er.newMultipartUpload(ctx, bucket, object, opts)
} }
// CopyObjectPart - reads incoming stream and internally erasure codes // CopyObjectPart - reads incoming stream and internally erasure codes
@ -221,8 +198,8 @@ func (xl xlObjects) NewMultipartUpload(ctx context.Context, bucket, object strin
// data is read from an existing object. // data is read from an existing object.
// //
// Implements S3 compatible Upload Part Copy API. // Implements S3 compatible Upload Part Copy API.
func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
partInfo, err := xl.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts) partInfo, err := er.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader, nil, nil), dstOpts)
if err != nil { if err != nil {
return pi, toObjectErr(err, dstBucket, dstObject) return pi, toObjectErr(err, dstBucket, dstObject)
} }
@ -236,64 +213,60 @@ func (xl xlObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, ds
// of the multipart transaction. // of the multipart transaction.
// //
// Implements S3 compatible Upload Part API. // Implements S3 compatible Upload Part API.
func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) {
data := r.Reader data := r.Reader
if err := checkPutObjectPartArgs(ctx, bucket, object, xl); err != nil {
return pi, err
}
// Validate input data size and it can never be less than zero. // Validate input data size and it can never be less than zero.
if data.Size() < -1 { if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.Application) logger.LogIf(ctx, errInvalidArgument, logger.Application)
return pi, toObjectErr(errInvalidArgument) return pi, toObjectErr(errInvalidArgument)
} }
var partsMetadata []xlMetaV1 var partsMetadata []FileInfo
var errs []error var errs []error
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
// Validates if upload ID exists. // Validates if upload ID exists.
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return pi, toObjectErr(err, bucket, object, uploadID) return pi, toObjectErr(err, bucket, object, uploadID)
} }
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs = readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, partsMetadata, errs = readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket,
uploadIDPath) uploadIDPath, "")
// get Quorum for this object // get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
if err != nil { if err != nil {
return pi, toObjectErr(err, bucket, object) return pi, toObjectErr(err, bucket, object)
} }
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errXLWriteQuorum { if reducedErr == errErasureWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object) return pi, toObjectErr(reducedErr, bucket, object)
} }
// List all online disks. // List all online disks.
onlineDisks, modTime := listOnlineDisks(xl.getDisks(), partsMetadata, errs) onlineDisks, modTime := listOnlineDisks(er.getDisks(), partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return pi, err return pi, err
} }
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
// Need a unique name for the part being written in minioMetaBucket to // Need a unique name for the part being written in minioMetaBucket to
// accommodate concurrent PutObjectPart requests // accommodate concurrent PutObjectPart requests
partSuffix := fmt.Sprintf("part.%d", partID) partSuffix := fmt.Sprintf("part.%d", partID)
tmpPart := mustGetUUID() tmpPart := mustGetUUID()
tmpPartPath := path.Join(tmpPart, partSuffix) tmpPartPath := pathJoin(tmpPart, partSuffix)
// Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete. // Delete the temporary object part. If PutObjectPart succeeds there would be nothing to delete.
defer xl.deleteObject(ctx, minioMetaTmpBucket, tmpPart, writeQuorum, false) defer er.deleteObject(ctx, minioMetaTmpBucket, tmpPart, writeQuorum)
erasure, err := NewErasure(ctx, xlMeta.Erasure.DataBlocks, xlMeta.Erasure.ParityBlocks, xlMeta.Erasure.BlockSize) erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil { if err != nil {
return pi, toObjectErr(err, bucket, object) return pi, toObjectErr(err, bucket, object)
} }
@ -303,16 +276,16 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
switch size := data.Size(); { switch size := data.Size(); {
case size == 0: case size == 0:
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1 || size >= blockSizeV1: case size == -1 || size >= fi.Erasure.BlockSize:
buffer = xl.bp.Get() buffer = er.bp.Get()
defer xl.bp.Put(buffer) defer er.bp.Put(buffer)
case size < blockSizeV1: case size < fi.Erasure.BlockSize:
// No need to allocate fully blockSizeV1 buffer if the incoming data is smaller. // No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smaller.
buffer = make([]byte, size, 2*size+int64(erasure.parityBlocks+erasure.dataBlocks-1)) buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1))
} }
if len(buffer) > int(xlMeta.Erasure.BlockSize) { if len(buffer) > int(fi.Erasure.BlockSize) {
buffer = buffer[:xlMeta.Erasure.BlockSize] buffer = buffer[:fi.Erasure.BlockSize]
} }
writers := make([]io.Writer, len(onlineDisks)) writers := make([]io.Writer, len(onlineDisks))
for i, disk := range onlineDisks { for i, disk := range onlineDisks {
@ -322,7 +295,7 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
} }
n, err := erasure.Encode(ctx, data, writers, buffer, erasure.dataBlocks+1) n, err := erasure.Encode(ctx, data, writers, buffer, fi.Erasure.DataBlocks+1)
closeBitrotWriters(writers) closeBitrotWriters(writers)
if err != nil { if err != nil {
return pi, toObjectErr(err, bucket, object) return pi, toObjectErr(err, bucket, object)
@ -341,21 +314,21 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
} }
// Validates if upload ID exists. // Validates if upload ID exists.
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return pi, toObjectErr(err, bucket, object, uploadID) return pi, toObjectErr(err, bucket, object, uploadID)
} }
// Rename temporary part file to its final location. // Rename temporary part file to its final location.
partPath := path.Join(uploadIDPath, partSuffix) partPath := pathJoin(uploadIDPath, fi.DataDir, partSuffix)
onlineDisks, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, false, writeQuorum, nil) onlineDisks, err = rename(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, false, writeQuorum, nil)
if err != nil { if err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
} }
// Read metadata again because it might be updated with parallel upload of another part. // Read metadata again because it might be updated with parallel upload of another part.
partsMetadata, errs = readAllXLMetadata(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath) partsMetadata, errs = readAllFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, "")
reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errXLWriteQuorum { if reducedErr == errErasureWriteQuorum {
return pi, toObjectErr(reducedErr, bucket, object) return pi, toObjectErr(reducedErr, bucket, object)
} }
@ -363,25 +336,26 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs) onlineDisks, modTime = listOnlineDisks(onlineDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
xlMeta, err = pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) fi, err = pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return pi, err return pi, err
} }
// Once part is successfully committed, proceed with updating XL metadata. // Once part is successfully committed, proceed with updating erasure metadata.
xlMeta.Stat.ModTime = UTCNow() fi.ModTime = UTCNow()
md5hex := r.MD5CurrentHexString() md5hex := r.MD5CurrentHexString()
// Add the current part. // Add the current part.
xlMeta.AddObjectPart(partID, md5hex, n, data.ActualSize()) fi.AddObjectPart(partID, md5hex, n, data.ActualSize())
for i, disk := range onlineDisks { for i, disk := range onlineDisks {
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
partsMetadata[i].Stat = xlMeta.Stat partsMetadata[i].Size = fi.Size
partsMetadata[i].Parts = xlMeta.Parts partsMetadata[i].ModTime = fi.ModTime
partsMetadata[i].Parts = fi.Parts
partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{ partsMetadata[i].Erasure.AddChecksumInfo(ChecksumInfo{
PartNumber: partID, PartNumber: partID,
Algorithm: DefaultBitrotAlgorithm, Algorithm: DefaultBitrotAlgorithm,
@ -389,19 +363,8 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
}) })
} }
// Write all the checksum metadata. // Writes update `xl.meta` format for each disk.
tempXLMetaPath := mustGetUUID() if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
// Cleanup in case of xl.json writing failure
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempXLMetaPath, writeQuorum, false)
// Writes a unique `xl.json` each disk carrying new checksum related information.
onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum)
if err != nil {
return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath)
}
if _, err = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil {
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
} }
@ -409,8 +372,8 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
return PartInfo{ return PartInfo{
PartNumber: partID, PartNumber: partID,
ETag: md5hex, ETag: md5hex,
LastModified: xlMeta.Stat.ModTime, LastModified: fi.ModTime,
Size: xlMeta.Stat.Size, Size: fi.Size,
ActualSize: data.ActualSize(), ActualSize: data.ActualSize(),
}, nil }, nil
} }
@ -419,44 +382,44 @@ func (xl xlObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
// by callers to verify object states // by callers to verify object states
// - encrypted // - encrypted
// - compressed // - compressed
func (xl xlObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) {
result := MultipartInfo{ result := MultipartInfo{
Bucket: bucket, Bucket: bucket,
Object: object, Object: object,
UploadID: uploadID, UploadID: uploadID,
} }
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, toObjectErr(err, bucket, object, uploadID) return result, toObjectErr(err, bucket, object, uploadID)
} }
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
storageDisks := xl.getDisks() storageDisks := er.getDisks()
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, opts.VersionID)
// get Quorum for this object // get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) readQuorum, _, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
if err != nil { if err != nil {
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
} }
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum)
if reducedErr == errXLWriteQuorum { if reducedErr == errErasureReadQuorum {
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath) return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
} }
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, readQuorum)
if err != nil { if err != nil {
return result, err return result, err
} }
result.UserDefined = xlMeta.Meta result.UserDefined = fi.Metadata
return result, nil return result, nil
} }
@ -467,51 +430,47 @@ func (xl xlObjects) GetMultipartInfo(ctx context.Context, bucket, object, upload
// Implements S3 compatible ListObjectParts API. The resulting // Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is marshaled directly into XML and // ListPartsInfo structure is marshaled directly into XML and
// replied back to the client. // replied back to the client.
func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) { func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) {
if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return result, toObjectErr(err, bucket, object, uploadID) return result, toObjectErr(err, bucket, object, uploadID)
} }
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
storageDisks := xl.getDisks() storageDisks := er.getDisks()
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "")
// get Quorum for this object // get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
if err != nil { if err != nil {
return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) return result, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
} }
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errXLWriteQuorum { if reducedErr == errErasureWriteQuorum {
return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath) return result, toObjectErr(reducedErr, minioMetaMultipartBucket, uploadIDPath)
} }
_, modTime := listOnlineDisks(storageDisks, partsMetadata, errs) _, modTime := listOnlineDisks(storageDisks, partsMetadata, errs)
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
xlValidMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return result, err return result, err
} }
var xlMeta = xlValidMeta.Meta
var xlParts = xlValidMeta.Parts
// Populate the result stub. // Populate the result stub.
result.Bucket = bucket result.Bucket = bucket
result.Object = object result.Object = object
result.UploadID = uploadID result.UploadID = uploadID
result.MaxParts = maxParts result.MaxParts = maxParts
result.PartNumberMarker = partNumberMarker result.PartNumberMarker = partNumberMarker
result.UserDefined = xlMeta result.UserDefined = fi.Metadata
// For empty number of parts or maxParts as zero, return right here. // For empty number of parts or maxParts as zero, return right here.
if len(xlParts) == 0 || maxParts == 0 { if len(fi.Parts) == 0 || maxParts == 0 {
return result, nil return result, nil
} }
@ -521,17 +480,17 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI
} }
// Only parts with higher part numbers will be listed. // Only parts with higher part numbers will be listed.
partIdx := objectPartIndex(xlParts, partNumberMarker) partIdx := objectPartIndex(fi.Parts, partNumberMarker)
parts := xlParts parts := fi.Parts
if partIdx != -1 { if partIdx != -1 {
parts = xlParts[partIdx+1:] parts = fi.Parts[partIdx+1:]
} }
count := maxParts count := maxParts
for _, part := range parts { for _, part := range parts {
result.Parts = append(result.Parts, PartInfo{ result.Parts = append(result.Parts, PartInfo{
PartNumber: part.Number, PartNumber: part.Number,
ETag: part.ETag, ETag: part.ETag,
LastModified: xlValidMeta.Stat.ModTime, LastModified: fi.ModTime,
Size: part.Size, Size: part.Size,
}) })
count-- count--
@ -556,14 +515,14 @@ func (xl xlObjects) ListObjectParts(ctx context.Context, bucket, object, uploadI
// md5sums of all the parts. // md5sums of all the parts.
// //
// Implements S3 compatible Complete multipart API. // Implements S3 compatible Complete multipart API.
func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) {
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return oi, toObjectErr(err, bucket, object, uploadID) return oi, toObjectErr(err, bucket, object, uploadID)
} }
// Check if an object is present as one of the parent dir. // Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock). // -- FIXME. (needs a new kind of lock).
if xl.parentDirIsObject(ctx, bucket, path.Dir(object)) { if er.parentDirIsObject(ctx, bucket, path.Dir(object)) {
return oi, toObjectErr(errFileParentIsFile, bucket, object) return oi, toObjectErr(errFileParentIsFile, bucket, object)
} }
@ -572,21 +531,21 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// Calculate s3 compatible md5sum for complete multipart. // Calculate s3 compatible md5sum for complete multipart.
s3MD5 := getCompleteMultipartMD5(parts) s3MD5 := getCompleteMultipartMD5(parts)
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
storageDisks := xl.getDisks() storageDisks := er.getDisks()
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath) partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, uploadIDPath, "")
// get Quorum for this object // get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
if err != nil { if err != nil {
return oi, toObjectErr(err, bucket, object) return oi, toObjectErr(err, bucket, object)
} }
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if reducedErr == errXLWriteQuorum { if reducedErr == errErasureWriteQuorum {
return oi, toObjectErr(reducedErr, bucket, object) return oi, toObjectErr(reducedErr, bucket, object)
} }
@ -599,28 +558,26 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
var objectActualSize int64 var objectActualSize int64
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
xlMeta, err := pickValidXLMeta(ctx, partsMetadata, modTime, writeQuorum) fi, err := pickValidFileInfo(ctx, partsMetadata, modTime, writeQuorum)
if err != nil { if err != nil {
return oi, err return oi, err
} }
// Order online disks in accordance with distribution order. // Order online disks in accordance with distribution order.
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution) onlineDisks = shuffleDisks(onlineDisks, fi.Erasure.Distribution)
// Order parts metadata in accordance with distribution order. // Order parts metadata in accordance with distribution order.
partsMetadata = shufflePartsMetadata(partsMetadata, xlMeta.Erasure.Distribution) partsMetadata = shufflePartsMetadata(partsMetadata, fi.Erasure.Distribution)
// Save current xl meta for validation. // Save current erasure metadata for validation.
var currentXLMeta = xlMeta var currentFI = fi
// Allocate parts similar to incoming slice. // Allocate parts similar to incoming slice.
xlMeta.Parts = make([]ObjectPartInfo, len(parts)) fi.Parts = make([]ObjectPartInfo, len(parts))
// Validate each part and then commit to disk. // Validate each part and then commit to disk.
for i, part := range parts { for i, part := range parts {
// ensure that part ETag is canonicalized to strip off extraneous quotes partIdx := objectPartIndex(currentFI.Parts, part.PartNumber)
part.ETag = canonicalizeETag(part.ETag)
partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber)
// All parts should have same part number. // All parts should have same part number.
if partIdx == -1 { if partIdx == -1 {
invp := InvalidPart{ invp := InvalidPart{
@ -630,116 +587,103 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
return oi, invp return oi, invp
} }
if currentXLMeta.Parts[partIdx].ETag != part.ETag { // ensure that part ETag is canonicalized to strip off extraneous quotes
part.ETag = canonicalizeETag(part.ETag)
if currentFI.Parts[partIdx].ETag != part.ETag {
invp := InvalidPart{ invp := InvalidPart{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
ExpETag: currentXLMeta.Parts[partIdx].ETag, ExpETag: currentFI.Parts[partIdx].ETag,
GotETag: part.ETag, GotETag: part.ETag,
} }
return oi, invp return oi, invp
} }
// All parts except the last part has to be atleast 5MB. // All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].ActualSize) { if (i < len(parts)-1) && !isMinAllowedPartSize(currentFI.Parts[partIdx].ActualSize) {
return oi, PartTooSmall{ return oi, PartTooSmall{
PartNumber: part.PartNumber, PartNumber: part.PartNumber,
PartSize: currentXLMeta.Parts[partIdx].ActualSize, PartSize: currentFI.Parts[partIdx].ActualSize,
PartETag: part.ETag, PartETag: part.ETag,
} }
} }
// Save for total object size. // Save for total object size.
objectSize += currentXLMeta.Parts[partIdx].Size objectSize += currentFI.Parts[partIdx].Size
// Save the consolidated actual size. // Save the consolidated actual size.
objectActualSize += currentXLMeta.Parts[partIdx].ActualSize objectActualSize += currentFI.Parts[partIdx].ActualSize
// Add incoming parts. // Add incoming parts.
xlMeta.Parts[i] = ObjectPartInfo{ fi.Parts[i] = ObjectPartInfo{
Number: part.PartNumber, Number: part.PartNumber,
Size: currentXLMeta.Parts[partIdx].Size, Size: currentFI.Parts[partIdx].Size,
ActualSize: currentXLMeta.Parts[partIdx].ActualSize, ActualSize: currentFI.Parts[partIdx].ActualSize,
} }
} }
// Save the final object size and modtime. // Save the final object size and modtime.
xlMeta.Stat.Size = objectSize fi.Size = objectSize
xlMeta.Stat.ModTime = UTCNow() fi.ModTime = UTCNow()
// Save successfully calculated md5sum. // Save successfully calculated md5sum.
xlMeta.Meta["etag"] = s3MD5 fi.Metadata["etag"] = s3MD5
// Save the consolidated actual size. // Save the consolidated actual size.
xlMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) fi.Metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10)
// Update all xl metadata, make sure to not modify fields like // Update all erasure metadata, make sure to not modify fields like
// checksum which are different on each disks. // checksum which are different on each disks.
for index := range partsMetadata { for index := range partsMetadata {
partsMetadata[index].Stat = xlMeta.Stat partsMetadata[index].Size = fi.Size
partsMetadata[index].Meta = xlMeta.Meta partsMetadata[index].ModTime = fi.ModTime
partsMetadata[index].Parts = xlMeta.Parts partsMetadata[index].Metadata = fi.Metadata
partsMetadata[index].Parts = fi.Parts
} }
tempXLMetaPath := mustGetUUID() // Write final `xl.meta` at uploadID location
if onlineDisks, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
// Cleanup in case of failure return oi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
defer xl.deleteObject(ctx, minioMetaTmpBucket, tempXLMetaPath, writeQuorum, false)
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, writeQuorum); err != nil {
return oi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath)
}
var rErr error
onlineDisks, rErr = commitXLMetadata(ctx, onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, writeQuorum)
if rErr != nil {
return oi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
}
if xl.isObject(bucket, object) {
// Rename if an object already exists to temporary location.
newUniqueID := mustGetUUID()
// Delete success renamed object.
defer xl.deleteObject(ctx, minioMetaTmpBucket, newUniqueID, writeQuorum, false)
// NOTE: Do not use online disks slice here: the reason is that existing object should be purged
// regardless of `xl.json` status and rolled back in case of errors. Also allow renaming of the
// existing object if it is not present in quorum disks so users can overwrite stale objects.
_, err = rename(ctx, xl.getDisks(), bucket, object, minioMetaTmpBucket, newUniqueID, true, writeQuorum, []error{errFileNotFound})
if err != nil {
return oi, toObjectErr(err, bucket, object)
}
} }
// Remove parts that weren't present in CompleteMultipartUpload request. // Remove parts that weren't present in CompleteMultipartUpload request.
for _, curpart := range currentXLMeta.Parts { for _, curpart := range currentFI.Parts {
if objectPartIndex(xlMeta.Parts, curpart.Number) == -1 { if objectPartIndex(fi.Parts, curpart.Number) == -1 {
// Delete the missing part files. e.g, // Delete the missing part files. e.g,
// Request 1: NewMultipart // Request 1: NewMultipart
// Request 2: PutObjectPart 1 // Request 2: PutObjectPart 1
// Request 3: PutObjectPart 2 // Request 3: PutObjectPart 2
// Request 4: CompleteMultipartUpload --part 2 // Request 4: CompleteMultipartUpload --part 2
// N.B. 1st part is not present. This part should be removed from the storage. // N.B. 1st part is not present. This part should be removed from the storage.
xl.removeObjectPart(bucket, object, uploadID, curpart.Number) er.removeObjectPart(bucket, object, uploadID, fi.DataDir, curpart.Number)
} }
} }
// Rename the multipart object to final location. // Rename the multipart object to final location.
if onlineDisks, err = rename(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, true, writeQuorum, nil); err != nil { if onlineDisks, err = renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath,
fi.DataDir, bucket, object, writeQuorum, nil); err != nil {
return oi, toObjectErr(err, bucket, object) return oi, toObjectErr(err, bucket, object)
} }
// Check if there is any offline disk and add it to the MRF list // Check if there is any offline disk and add it to the MRF list
for i := 0; i < len(onlineDisks); i++ { for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] == nil || storageDisks[i] == nil { if onlineDisks[i] == nil || storageDisks[i] == nil {
xl.addPartialUpload(bucket, object) er.addPartialUpload(bucket, object)
break break
} }
} }
for i := 0; i < len(onlineDisks); i++ {
if onlineDisks[i] == nil {
continue
}
// Object info is the same in all disks, so we can pick
// the first meta from online disk
fi = partsMetadata[i]
break
}
// Success, return object info. // Success, return object info.
return xlMeta.ToObjectInfo(bucket, object), nil return fi.ToObjectInfo(bucket, object), nil
} }
// AbortMultipartUpload - aborts an ongoing multipart operation // AbortMultipartUpload - aborts an ongoing multipart operation
@ -753,79 +697,28 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
// Implements S3 compatible Abort multipart API, slight difference is // Implements S3 compatible Abort multipart API, slight difference is
// that this is an atomic idempotent operation. Subsequent calls have // that this is an atomic idempotent operation. Subsequent calls have
// no affect and further requests to the same uploadID would not be honored. // no affect and further requests to the same uploadID would not be honored.
func (xl xlObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error { func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) error {
// Validates if upload ID exists. // Validates if upload ID exists.
if err := xl.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { if err := er.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
return toObjectErr(err, bucket, object, uploadID) return toObjectErr(err, bucket, object, uploadID)
} }
uploadIDPath := xl.getUploadIDDir(bucket, object, uploadID) uploadIDPath := er.getUploadIDDir(bucket, object, uploadID)
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs := readAllXLMetadata(ctx, xl.getDisks(), minioMetaMultipartBucket, uploadIDPath) partsMetadata, errs := readAllFileInfo(ctx, er.getDisks(), minioMetaMultipartBucket, uploadIDPath, "")
// get Quorum for this object // get Quorum for this object
_, writeQuorum, err := objectQuorumFromMeta(ctx, xl, partsMetadata, errs) _, writeQuorum, err := objectQuorumFromMeta(ctx, er, partsMetadata, errs)
if err != nil { if err != nil {
return toObjectErr(err, bucket, object, uploadID) return toObjectErr(err, bucket, object, uploadID)
} }
// Cleanup all uploaded parts. // Cleanup all uploaded parts.
if err = xl.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum, false); err != nil { if err = er.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum); err != nil {
return toObjectErr(err, bucket, object, uploadID) return toObjectErr(err, bucket, object, uploadID)
} }
// Successfully purged. // Successfully purged.
return nil return nil
} }
// Clean-up the old multipart uploads. Should be run in a Go routine.
func (xl xlObjects) cleanupStaleMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration, doneCh <-chan struct{}) {
ticker := time.NewTicker(cleanupInterval)
defer ticker.Stop()
for {
select {
case <-doneCh:
return
case <-ticker.C:
var disk StorageAPI
for _, d := range xl.getLoadBalancedDisks() {
if d != nil {
disk = d
break
}
}
if disk == nil {
continue
}
xl.cleanupStaleMultipartUploadsOnDisk(ctx, disk, expiry)
}
}
}
// Remove the old multipart uploads on the given disk.
func (xl xlObjects) cleanupStaleMultipartUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) {
now := time.Now()
shaDirs, err := disk.ListDir(minioMetaMultipartBucket, "", -1, "")
if err != nil {
return
}
for _, shaDir := range shaDirs {
uploadIDDirs, err := disk.ListDir(minioMetaMultipartBucket, shaDir, -1, "")
if err != nil {
continue
}
for _, uploadIDDir := range uploadIDDirs {
uploadIDPath := pathJoin(shaDir, uploadIDDir)
fi, err := disk.StatFile(minioMetaMultipartBucket, pathJoin(uploadIDPath, xlMetaJSONFile))
if err != nil {
continue
}
if now.Sub(fi.ModTime) > expiry {
writeQuorum := getWriteQuorum(len(xl.getDisks()))
xl.deleteObject(ctx, minioMetaMultipartBucket, uploadIDPath, writeQuorum, false)
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -20,16 +20,11 @@ import (
"bytes" "bytes"
"context" "context"
"io/ioutil" "io/ioutil"
"math/rand"
"os" "os"
"path"
"reflect"
"testing" "testing"
"time"
humanize "github.com/dustin/go-humanize" humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/config/storageclass" "github.com/minio/minio/cmd/config/storageclass"
"github.com/minio/minio/pkg/madmin"
) )
func TestRepeatPutObjectPart(t *testing.T) { func TestRepeatPutObjectPart(t *testing.T) {
@ -41,7 +36,7 @@ func TestRepeatPutObjectPart(t *testing.T) {
var err error var err error
var opts ObjectOptions var opts ObjectOptions
objLayer, disks, err = prepareXL16(ctx) objLayer, disks, err = prepareErasure16(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -49,7 +44,7 @@ func TestRepeatPutObjectPart(t *testing.T) {
// cleaning up of temporary test directories // cleaning up of temporary test directories
defer removeRoots(disks) defer removeRoots(disks)
err = objLayer.MakeBucketWithLocation(ctx, "bucket1", "", false) err = objLayer.MakeBucketWithLocation(ctx, "bucket1", BucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -71,7 +66,7 @@ func TestRepeatPutObjectPart(t *testing.T) {
} }
} }
func TestXLDeleteObjectBasic(t *testing.T) { func TestErasureDeleteObjectBasic(t *testing.T) {
testCases := []struct { testCases := []struct {
bucket string bucket string
object string object string
@ -91,12 +86,12 @@ func TestXLDeleteObjectBasic(t *testing.T) {
defer cancel() defer cancel()
// Create an instance of xl backend // Create an instance of xl backend
xl, fsDirs, err := prepareXL16(ctx) xl, fsDirs, err := prepareErasure16(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
err = xl.MakeBucketWithLocation(ctx, "bucket", "", false) err = xl.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -104,40 +99,43 @@ func TestXLDeleteObjectBasic(t *testing.T) {
// Create object "dir/obj" under bucket "bucket" for Test 7 to pass // Create object "dir/obj" under bucket "bucket" for Test 7 to pass
_, err = xl.PutObject(ctx, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) _, err = xl.PutObject(ctx, "bucket", "dir/obj", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err) t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
} }
for i, test := range testCases { for _, test := range testCases {
actualErr := xl.DeleteObject(ctx, test.bucket, test.object) test := test
if test.expectedErr != nil && actualErr != test.expectedErr { t.Run("", func(t *testing.T) {
t.Errorf("Test %d: Expected to fail with %s, but failed with %s", i+1, test.expectedErr, actualErr) _, actualErr := xl.DeleteObject(ctx, test.bucket, test.object, ObjectOptions{})
} if test.expectedErr != nil && actualErr != test.expectedErr {
if test.expectedErr == nil && actualErr != nil { t.Errorf("Expected to fail with %s, but failed with %s", test.expectedErr, actualErr)
t.Errorf("Test %d: Expected to pass, but failed with %s", i+1, actualErr) }
} if test.expectedErr == nil && actualErr != nil {
t.Errorf("Expected to pass, but failed with %s", actualErr)
}
})
} }
// Cleanup backend directories // Cleanup backend directories
removeRoots(fsDirs) removeRoots(fsDirs)
} }
func TestXLDeleteObjectsXLSet(t *testing.T) { func TestErasureDeleteObjectsErasureSet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
var objs []*xlObjects var objs []*erasureObjects
for i := 0; i < 32; i++ { for i := 0; i < 32; i++ {
obj, fsDirs, err := prepareXL(ctx, 16) obj, fsDirs, err := prepareErasure(ctx, 16)
if err != nil { if err != nil {
t.Fatal("Unable to initialize 'XL' object layer.", err) t.Fatal("Unable to initialize 'Erasure' object layer.", err)
} }
// Remove all dirs. // Remove all dirs.
for _, dir := range fsDirs { for _, dir := range fsDirs {
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
} }
z := obj.(*xlZones) z := obj.(*erasureZones)
xl := z.zones[0].sets[0] xl := z.zones[0].sets[0]
objs = append(objs, xl) objs = append(objs, xl)
} }
xlSets := &xlSets{sets: objs, distributionAlgo: "CRCMOD"} erasureSets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"}
type testCaseType struct { type testCaseType struct {
bucket string bucket string
@ -152,32 +150,29 @@ func TestXLDeleteObjectsXLSet(t *testing.T) {
{bucketName, "obj_4"}, {bucketName, "obj_4"},
} }
err := xlSets.MakeBucketWithLocation(GlobalContext, bucketName, "", false) err := erasureSets.MakeBucketWithLocation(ctx, bucketName, BucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
for _, testCase := range testCases { for _, testCase := range testCases {
_, err = xlSets.PutObject(GlobalContext, testCase.bucket, testCase.object, _, err = erasureSets.PutObject(ctx, testCase.bucket, testCase.object,
mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
if err != nil { if err != nil {
t.Fatalf("XL Object upload failed: <ERROR> %s", err) t.Fatalf("Erasure Object upload failed: <ERROR> %s", err)
} }
} }
toObjectNames := func(testCases []testCaseType) []string { toObjectNames := func(testCases []testCaseType) []ObjectToDelete {
names := make([]string, len(testCases)) names := make([]ObjectToDelete, len(testCases))
for i := range testCases { for i := range testCases {
names[i] = testCases[i].object names[i] = ObjectToDelete{ObjectName: testCases[i].object}
} }
return names return names
} }
objectNames := toObjectNames(testCases) objectNames := toObjectNames(testCases)
delErrs, err := xlSets.DeleteObjects(GlobalContext, bucketName, objectNames) _, delErrs := erasureSets.DeleteObjects(ctx, bucketName, objectNames, ObjectOptions{})
if err != nil {
t.Errorf("Failed to call DeleteObjects with the error: `%v`", err)
}
for i := range delErrs { for i := range delErrs {
if delErrs[i] != nil { if delErrs[i] != nil {
@ -186,7 +181,7 @@ func TestXLDeleteObjectsXLSet(t *testing.T) {
} }
for _, test := range testCases { for _, test := range testCases {
_, statErr := xlSets.GetObjectInfo(GlobalContext, test.bucket, test.object, ObjectOptions{}) _, statErr := erasureSets.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{})
switch statErr.(type) { switch statErr.(type) {
case ObjectNotFound: case ObjectNotFound:
default: default:
@ -195,23 +190,23 @@ func TestXLDeleteObjectsXLSet(t *testing.T) {
} }
} }
func TestXLDeleteObjectDiskNotFound(t *testing.T) { func TestErasureDeleteObjectDiskNotFound(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := prepareXL16(ctx) obj, fsDirs, err := prepareErasure16(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Cleanup backend directories // Cleanup backend directories
defer removeRoots(fsDirs) defer removeRoots(fsDirs)
z := obj.(*xlZones) z := obj.(*erasureZones)
xl := z.zones[0].sets[0] xl := z.zones[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -226,16 +221,17 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
} }
// for a 16 disk setup, quorum is 9. To simulate disks not found yet // for a 16 disk setup, quorum is 9. To simulate disks not found yet
// quorum is available, we remove disks leaving quorum disks behind. // quorum is available, we remove disks leaving quorum disks behind.
xlDisks := xl.getDisks() erasureDisks := xl.getDisks()
z.zones[0].xlDisksMu.Lock() z.zones[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI { xl.getDisks = func() []StorageAPI {
for i := range xlDisks[:7] { for i := range erasureDisks[:7] {
xlDisks[i] = newNaughtyDisk(xlDisks[i], nil, errFaultyDisk) erasureDisks[i] = newNaughtyDisk(erasureDisks[i], nil, errFaultyDisk)
} }
return xlDisks return erasureDisks
} }
z.zones[0].xlDisksMu.Unlock()
err = obj.DeleteObject(ctx, bucket, object) z.zones[0].erasureDisksMu.Unlock()
_, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -247,18 +243,19 @@ func TestXLDeleteObjectDiskNotFound(t *testing.T) {
} }
// Remove one more disk to 'lose' quorum, by setting it to nil. // Remove one more disk to 'lose' quorum, by setting it to nil.
xlDisks = xl.getDisks() erasureDisks = xl.getDisks()
z.zones[0].xlDisksMu.Lock() z.zones[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI { xl.getDisks = func() []StorageAPI {
xlDisks[7] = nil erasureDisks[7] = nil
xlDisks[8] = nil erasureDisks[8] = nil
return xlDisks return erasureDisks
} }
z.zones[0].xlDisksMu.Unlock()
err = obj.DeleteObject(ctx, bucket, object) z.zones[0].erasureDisksMu.Unlock()
// since majority of disks are not available, metaquorum is not achieved and hence errXLReadQuorum error _, err = obj.DeleteObject(ctx, bucket, object, ObjectOptions{})
if err != toObjectErr(errXLReadQuorum, bucket, object) { // since majority of disks are not available, metaquorum is not achieved and hence errErasureWriteQuorum error
t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errXLReadQuorum, bucket, object), err) if err != toObjectErr(errErasureWriteQuorum, bucket, object) {
t.Errorf("Expected deleteObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
} }
} }
@ -267,18 +264,18 @@ func TestGetObjectNoQuorum(t *testing.T) {
defer cancel() defer cancel()
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := prepareXL16(ctx) obj, fsDirs, err := prepareErasure16(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Cleanup backend directories. // Cleanup backend directories.
defer removeRoots(fsDirs) defer removeRoots(fsDirs)
z := obj.(*xlZones) z := obj.(*erasureZones)
xl := z.zones[0].sets[0] xl := z.zones[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -293,7 +290,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
} }
// Make 9 disks offline, which leaves less than quorum number of disks // Make 9 disks offline, which leaves less than quorum number of disks
// in a 16 disk XL setup. The original disks are 'replaced' with // in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method // naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,2) // invocations, where f - [0,2)
for f := 0; f < 2; f++ { for f := 0; f < 2; f++ {
@ -301,24 +298,24 @@ func TestGetObjectNoQuorum(t *testing.T) {
for i := 0; i <= f; i++ { for i := 0; i <= f; i++ {
diskErrors[i] = nil diskErrors[i] = nil
} }
xlDisks := xl.getDisks() erasureDisks := xl.getDisks()
for i := range xlDisks[:9] { for i := range erasureDisks[:9] {
switch diskType := xlDisks[i].(type) { switch diskType := erasureDisks[i].(type) {
case *naughtyDisk: case *naughtyDisk:
xlDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
default: default:
xlDisks[i] = newNaughtyDisk(xlDisks[i], diskErrors, errFaultyDisk) erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk)
} }
} }
z.zones[0].xlDisksMu.Lock() z.zones[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI { xl.getDisks = func() []StorageAPI {
return xlDisks return erasureDisks
} }
z.zones[0].xlDisksMu.Unlock() z.zones[0].erasureDisksMu.Unlock()
// Fetch object from store. // Fetch object from store.
err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts) err = xl.GetObject(ctx, bucket, object, 0, int64(len("abcd")), ioutil.Discard, "", opts)
if err != toObjectErr(errXLReadQuorum, bucket, object) { if err != toObjectErr(errErasureReadQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
} }
} }
} }
@ -328,7 +325,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
defer cancel() defer cancel()
// Create an instance of xl backend. // Create an instance of xl backend.
obj, fsDirs, err := prepareXL16(ctx) obj, fsDirs, err := prepareErasure16(ctx)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -336,11 +333,11 @@ func TestPutObjectNoQuorum(t *testing.T) {
// Cleanup backend directories. // Cleanup backend directories.
defer removeRoots(fsDirs) defer removeRoots(fsDirs)
z := obj.(*xlZones) z := obj.(*erasureZones)
xl := z.zones[0].sets[0] xl := z.zones[0].sets[0]
// Create "bucket" // Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", "", false) err = obj.MakeBucketWithLocation(ctx, "bucket", BucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -355,7 +352,7 @@ func TestPutObjectNoQuorum(t *testing.T) {
} }
// Make 9 disks offline, which leaves less than quorum number of disks // Make 9 disks offline, which leaves less than quorum number of disks
// in a 16 disk XL setup. The original disks are 'replaced' with // in a 16 disk Erasure setup. The original disks are 'replaced' with
// naughtyDisks that fail after 'f' successful StorageAPI method // naughtyDisks that fail after 'f' successful StorageAPI method
// invocations, where f - [0,3) // invocations, where f - [0,3)
for f := 0; f < 3; f++ { for f := 0; f < 3; f++ {
@ -363,143 +360,38 @@ func TestPutObjectNoQuorum(t *testing.T) {
for i := 0; i <= f; i++ { for i := 0; i <= f; i++ {
diskErrors[i] = nil diskErrors[i] = nil
} }
xlDisks := xl.getDisks() erasureDisks := xl.getDisks()
for i := range xlDisks[:9] { for i := range erasureDisks[:9] {
switch diskType := xlDisks[i].(type) { switch diskType := erasureDisks[i].(type) {
case *naughtyDisk: case *naughtyDisk:
xlDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk) erasureDisks[i] = newNaughtyDisk(diskType.disk, diskErrors, errFaultyDisk)
default: default:
xlDisks[i] = newNaughtyDisk(xlDisks[i], diskErrors, errFaultyDisk) erasureDisks[i] = newNaughtyDisk(erasureDisks[i], diskErrors, errFaultyDisk)
} }
} }
z.zones[0].xlDisksMu.Lock() z.zones[0].erasureDisksMu.Lock()
xl.getDisks = func() []StorageAPI { xl.getDisks = func() []StorageAPI {
return xlDisks return erasureDisks
} }
z.zones[0].xlDisksMu.Unlock() z.zones[0].erasureDisksMu.Unlock()
// Upload new content to same object "object" // Upload new content to same object "object"
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts) _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), opts)
if err != toObjectErr(errXLWriteQuorum, bucket, object) { if err != toObjectErr(errErasureWriteQuorum, bucket, object) {
t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errXLWriteQuorum, bucket, object), err) t.Errorf("Expected putObject to fail with %v, but failed with %v", toObjectErr(errErasureWriteQuorum, bucket, object), err)
} }
} }
} }
// Tests both object and bucket healing.
func TestHealing(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
obj, fsDirs, err := prepareXL16(ctx)
if err != nil {
t.Fatal(err)
}
defer removeRoots(fsDirs)
z := obj.(*xlZones)
xl := z.zones[0].sets[0]
// Create "bucket"
err = obj.MakeBucketWithLocation(ctx, "bucket", "", false)
if err != nil {
t.Fatal(err)
}
bucket := "bucket"
object := "object"
data := make([]byte, 1*humanize.MiByte)
length := int64(len(data))
_, err = rand.Read(data)
if err != nil {
t.Fatal(err)
}
_, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), length, "", ""), ObjectOptions{})
if err != nil {
t.Fatal(err)
}
disk := xl.getDisks()[0]
xlMetaPreHeal, err := readXLMeta(ctx, disk, bucket, object)
if err != nil {
t.Fatal(err)
}
// Remove the object - to simulate the case where the disk was down when the object
// was created.
err = os.RemoveAll(path.Join(fsDirs[0], bucket, object))
if err != nil {
t.Fatal(err)
}
_, err = xl.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealNormalScan})
if err != nil {
t.Fatal(err)
}
xlMetaPostHeal, err := readXLMeta(ctx, disk, bucket, object)
if err != nil {
t.Fatal(err)
}
// After heal the meta file should be as expected.
if !reflect.DeepEqual(xlMetaPreHeal, xlMetaPostHeal) {
t.Fatal("HealObject failed")
}
err = os.RemoveAll(path.Join(fsDirs[0], bucket, object, "xl.json"))
if err != nil {
t.Fatal(err)
}
// Write xl.json with different modtime to simulate the case where a disk had
// gone down when an object was replaced by a new object.
xlMetaOutDated := xlMetaPreHeal
xlMetaOutDated.Stat.ModTime = time.Now()
err = writeXLMetadata(ctx, disk, bucket, object, xlMetaOutDated)
if err != nil {
t.Fatal(err)
}
_, err = xl.HealObject(ctx, bucket, object, madmin.HealOpts{ScanMode: madmin.HealDeepScan})
if err != nil {
t.Fatal(err)
}
xlMetaPostHeal, err = readXLMeta(ctx, disk, bucket, object)
if err != nil {
t.Fatal(err)
}
// After heal the meta file should be as expected.
if !reflect.DeepEqual(xlMetaPreHeal, xlMetaPostHeal) {
t.Fatal("HealObject failed")
}
// Remove the bucket - to simulate the case where bucket was
// created when the disk was down.
err = os.RemoveAll(path.Join(fsDirs[0], bucket))
if err != nil {
t.Fatal(err)
}
// This would create the bucket.
_, err = xl.HealBucket(ctx, bucket, false, false)
if err != nil {
t.Fatal(err)
}
// Stat the bucket to make sure that it was created.
_, err = xl.getDisks()[0].StatVol(bucket)
if err != nil {
t.Fatal(err)
}
}
func TestObjectQuorumFromMeta(t *testing.T) { func TestObjectQuorumFromMeta(t *testing.T) {
ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta) ExecObjectLayerTestWithDirs(t, testObjectQuorumFromMeta)
} }
func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) { func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []string, t TestErrHandler) {
restoreGlobalStorageClass := globalStorageClass
defer func() {
globalStorageClass = restoreGlobalStorageClass
}()
bucket := getRandomBucketName() bucket := getRandomBucketName()
var opts ObjectOptions var opts ObjectOptions
@ -507,45 +399,48 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
partCount := 3 partCount := 3
data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount) data := bytes.Repeat([]byte("a"), 6*1024*1024*partCount)
z := obj.(*xlZones) z := obj.(*erasureZones)
xl := z.zones[0].sets[0] xl := z.zones[0].sets[0]
xlDisks := xl.getDisks() erasureDisks := xl.getDisks()
err := obj.MakeBucketWithLocation(GlobalContext, bucket, globalMinioDefaultRegion, false) ctx, cancel := context.WithCancel(GlobalContext)
defer cancel()
err := obj.MakeBucketWithLocation(ctx, bucket, BucketOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a bucket %v", err) t.Fatalf("Failed to make a bucket %v", err)
} }
// Object for test case 1 - No StorageClass defined, no MetaData in PutObject // Object for test case 1 - No StorageClass defined, no MetaData in PutObject
object1 := "object1" object1 := "object1"
_, err = obj.PutObject(GlobalContext, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts) _, err = obj.PutObject(ctx, bucket, object1, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), opts)
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts1, errs1 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object1) parts1, errs1 := readAllFileInfo(ctx, erasureDisks, bucket, object1, "")
// Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class // Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class
object2 := "object2" object2 := "object2"
metadata2 := make(map[string]string) metadata2 := make(map[string]string)
metadata2["x-amz-storage-class"] = storageclass.RRS metadata2["x-amz-storage-class"] = storageclass.RRS
_, err = obj.PutObject(GlobalContext, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2}) _, err = obj.PutObject(ctx, bucket, object2, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata2})
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts2, errs2 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object2) parts2, errs2 := readAllFileInfo(ctx, erasureDisks, bucket, object2, "")
// Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class // Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class
object3 := "object3" object3 := "object3"
metadata3 := make(map[string]string) metadata3 := make(map[string]string)
metadata3["x-amz-storage-class"] = storageclass.STANDARD metadata3["x-amz-storage-class"] = storageclass.STANDARD
_, err = obj.PutObject(GlobalContext, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3}) _, err = obj.PutObject(ctx, bucket, object3, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata3})
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts3, errs3 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object3) parts3, errs3 := readAllFileInfo(ctx, erasureDisks, bucket, object3, "")
// Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class // Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class
object4 := "object4" object4 := "object4"
@ -557,12 +452,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
}, },
} }
_, err = obj.PutObject(GlobalContext, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4}) _, err = obj.PutObject(ctx, bucket, object4, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata4})
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts4, errs4 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object4) parts4, errs4 := readAllFileInfo(ctx, erasureDisks, bucket, object4, "")
// Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class // Object for test case 5 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting RRS Class
// Reset global storage class flags // Reset global storage class flags
@ -575,12 +470,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
}, },
} }
_, err = obj.PutObject(GlobalContext, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5}) _, err = obj.PutObject(ctx, bucket, object5, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata5})
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts5, errs5 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object5) parts5, errs5 := readAllFileInfo(ctx, erasureDisks, bucket, object5, "")
// Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class // Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class
object6 := "object6" object6 := "object6"
@ -592,12 +487,12 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
}, },
} }
_, err = obj.PutObject(GlobalContext, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6}) _, err = obj.PutObject(ctx, bucket, object6, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata6})
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts6, errs6 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object6) parts6, errs6 := readAllFileInfo(ctx, erasureDisks, bucket, object6, "")
// Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class // Object for test case 7 - Standard StorageClass defined as Parity 5, MetaData in PutObject requesting RRS Class
// Reset global storage class flags // Reset global storage class flags
@ -610,15 +505,15 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
}, },
} }
_, err = obj.PutObject(GlobalContext, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7}) _, err = obj.PutObject(ctx, bucket, object7, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{UserDefined: metadata7})
if err != nil { if err != nil {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts7, errs7 := readAllXLMetadata(GlobalContext, xlDisks, bucket, object7) parts7, errs7 := readAllFileInfo(ctx, erasureDisks, bucket, object7, "")
tests := []struct { tests := []struct {
parts []xlMetaV1 parts []FileInfo
errs []error errs []error
expectedReadQuorum int expectedReadQuorum int
expectedWriteQuorum int expectedWriteQuorum int
@ -632,23 +527,22 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
{parts6, errs6, 8, 9, nil}, {parts6, errs6, 8, 9, nil},
{parts7, errs7, 14, 15, nil}, {parts7, errs7, 14, 15, nil},
} }
for i, tt := range tests { for _, tt := range tests {
actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(GlobalContext, *xl, tt.parts, tt.errs) tt := tt
if tt.expectedError != nil && err == nil { t.(*testing.T).Run("", func(t *testing.T) {
t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, *xl, tt.parts, tt.errs)
return if tt.expectedError != nil && err == nil {
} t.Errorf("Expected %s, got %s", tt.expectedError, err)
if tt.expectedError == nil && err != nil { }
t.Errorf("Test %d, Expected %s, got %s", i+1, tt.expectedError, err) if tt.expectedError == nil && err != nil {
return t.Errorf("Expected %s, got %s", tt.expectedError, err)
} }
if tt.expectedReadQuorum != actualReadQuorum { if tt.expectedReadQuorum != actualReadQuorum {
t.Errorf("Test %d, Expected Read Quorum %d, got %d", i+1, tt.expectedReadQuorum, actualReadQuorum) t.Errorf("Expected Read Quorum %d, got %d", tt.expectedReadQuorum, actualReadQuorum)
return }
} if tt.expectedWriteQuorum != actualWriteQuorum {
if tt.expectedWriteQuorum != actualWriteQuorum { t.Errorf("Expected Write Quorum %d, got %d", tt.expectedWriteQuorum, actualWriteQuorum)
t.Errorf("Test %d, Expected Write Quorum %d, got %d", i+1, tt.expectedWriteQuorum, actualWriteQuorum) }
return })
}
} }
} }

File diff suppressed because it is too large Load Diff

245
cmd/erasure-sets_test.go Normal file
View File

@ -0,0 +1,245 @@
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/google/uuid"
)
var testUUID = uuid.MustParse("f5c58c61-7175-4018-ab5e-a94fe9c2de4e")
func BenchmarkCrcHash(b *testing.B) {
cases := []struct {
key int
}{
{16},
{64},
{128},
{256},
{512},
{1024},
}
for _, testCase := range cases {
testCase := testCase
key := randString(testCase.key)
b.Run("", func(b *testing.B) {
b.SetBytes(1024)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
crcHashMod(key, 16)
}
})
}
}
func BenchmarkSipHash(b *testing.B) {
cases := []struct {
key int
}{
{16},
{64},
{128},
{256},
{512},
{1024},
}
for _, testCase := range cases {
testCase := testCase
key := randString(testCase.key)
b.Run("", func(b *testing.B) {
b.SetBytes(1024)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
sipHashMod(key, 16, testUUID)
}
})
}
}
// TestSipHashMod - test sip hash.
func TestSipHashMod(t *testing.T) {
testCases := []struct {
objectName string
sipHash int
}{
// cases which should pass the test.
// passing in valid object name.
{"object", 37},
{"The Shining Script <v1>.pdf", 38},
{"Cost Benefit Analysis (2009-2010).pptx", 59},
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 35},
{"SHØRT", 49},
{"There are far too many object names, and far too few bucket names!", 8},
{"a/b/c/", 159},
{"/a/b/c", 96},
{string([]byte{0xff, 0xfe, 0xfd}), 147},
}
// Tests hashing order to be consistent.
for i, testCase := range testCases {
if sipHashElement := hashKey("SIPMOD", testCase.objectName, 200, testUUID); sipHashElement != testCase.sipHash {
t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.sipHash, sipHashElement)
}
}
if sipHashElement := hashKey("SIPMOD", "This will fail", -1, testUUID); sipHashElement != -1 {
t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement)
}
if sipHashElement := hashKey("SIPMOD", "This will fail", 0, testUUID); sipHashElement != -1 {
t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement)
}
if sipHashElement := hashKey("UNKNOWN", "This will fail", 0, testUUID); sipHashElement != -1 {
t.Errorf("Test: Expected \"-1\" but got \"%v\"", sipHashElement)
}
}
// TestCrcHashMod - test crc hash.
func TestCrcHashMod(t *testing.T) {
testCases := []struct {
objectName string
crcHash int
}{
// cases which should pass the test.
// passing in valid object name.
{"object", 28},
{"The Shining Script <v1>.pdf", 142},
{"Cost Benefit Analysis (2009-2010).pptx", 133},
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", 185},
{"SHØRT", 97},
{"There are far too many object names, and far too few bucket names!", 101},
{"a/b/c/", 193},
{"/a/b/c", 116},
{string([]byte{0xff, 0xfe, 0xfd}), 61},
}
// Tests hashing order to be consistent.
for i, testCase := range testCases {
if crcHashElement := hashKey("CRCMOD", testCase.objectName, 200, testUUID); crcHashElement != testCase.crcHash {
t.Errorf("Test case %d: Expected \"%v\" but failed \"%v\"", i+1, testCase.crcHash, crcHashElement)
}
}
if crcHashElement := hashKey("CRCMOD", "This will fail", -1, testUUID); crcHashElement != -1 {
t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement)
}
if crcHashElement := hashKey("CRCMOD", "This will fail", 0, testUUID); crcHashElement != -1 {
t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement)
}
if crcHashElement := hashKey("UNKNOWN", "This will fail", 0, testUUID); crcHashElement != -1 {
t.Errorf("Test: Expected \"-1\" but got \"%v\"", crcHashElement)
}
}
// TestNewErasure - tests initialization of all input disks
// and constructs a valid `Erasure` object
func TestNewErasureSets(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var nDisks = 16 // Maximum disks.
var erasureDisks []string
for i := 0; i < nDisks; i++ {
// Do not attempt to create this path, the test validates
// so that newErasureSets initializes non existing paths
// and successfully returns initialized object layer.
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
erasureDisks = append(erasureDisks, disk)
defer os.RemoveAll(disk)
}
endpoints := mustGetNewEndpoints(erasureDisks...)
_, _, err := waitForFormatErasure(true, endpoints, 1, 0, 16, "")
if err != errInvalidArgument {
t.Fatalf("Expecting error, got %s", err)
}
_, _, err = waitForFormatErasure(true, nil, 1, 1, 16, "")
if err != errInvalidArgument {
t.Fatalf("Expecting error, got %s", err)
}
// Initializes all erasure disks
storageDisks, format, err := waitForFormatErasure(true, endpoints, 1, 1, 16, "")
if err != nil {
t.Fatalf("Unable to format disks for erasure, %s", err)
}
if _, err := newErasureSets(ctx, endpoints, storageDisks, format); err != nil {
t.Fatalf("Unable to initialize erasure")
}
}
// TestHashedLayer - tests the hashed layer which will be returned
// consistently for a given object name.
func TestHashedLayer(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var objs []*erasureObjects
for i := 0; i < 16; i++ {
obj, fsDirs, err := prepareErasure16(ctx)
if err != nil {
t.Fatal("Unable to initialize 'Erasure' object layer.", err)
}
// Remove all dirs.
for _, dir := range fsDirs {
defer os.RemoveAll(dir)
}
z := obj.(*erasureZones)
objs = append(objs, z.zones[0].sets[0])
}
sets := &erasureSets{sets: objs, distributionAlgo: "CRCMOD"}
testCases := []struct {
objectName string
expectedObj *erasureObjects
}{
// cases which should pass the test.
// passing in valid object name.
{"object", objs[12]},
{"The Shining Script <v1>.pdf", objs[14]},
{"Cost Benefit Analysis (2009-2010).pptx", objs[13]},
{"117Gn8rfHL2ACARPAhaFd0AGzic9pUbIA/5OCn5A", objs[1]},
{"SHØRT", objs[9]},
{"There are far too many object names, and far too few bucket names!", objs[13]},
{"a/b/c/", objs[1]},
{"/a/b/c", objs[4]},
{string([]byte{0xff, 0xfe, 0xfd}), objs[13]},
}
// Tests hashing order to be consistent.
for i, testCase := range testCases {
gotObj := sets.getHashedSet(testCase.objectName)
if gotObj != testCase.expectedObj {
t.Errorf("Test case %d: Expected \"%#v\" but failed \"%#v\"", i+1, testCase.expectedObj, gotObj)
}
}
}

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,136 +18,373 @@ package cmd
import ( import (
"context" "context"
"fmt"
"sort"
"sync" "sync"
"time"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/cmd/logger" "github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bpool"
"github.com/minio/minio/pkg/dsync"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/sync/errgroup"
) )
// Erasure - erasure encoding details. // OfflineDisk represents an unavailable disk.
type Erasure struct { var OfflineDisk StorageAPI // zero value is nil
encoder func() reedsolomon.Encoder
dataBlocks, parityBlocks int // partialUpload is a successful upload of an object
blockSize int64 // but not written in all disks (having quorum)
type partialUpload struct {
bucket string
object string
failedSet int
} }
// NewErasure creates a new ErasureStorage. // erasureObjects - Implements ER object layer.
func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int64) (e Erasure, err error) { type erasureObjects struct {
e = Erasure{ GatewayUnsupported
dataBlocks: dataBlocks,
parityBlocks: parityBlocks,
blockSize: blockSize,
}
// Check the parameters for sanity now. // getDisks returns list of storageAPIs.
if dataBlocks <= 0 || parityBlocks <= 0 { getDisks func() []StorageAPI
return e, reedsolomon.ErrInvShardNum
}
if dataBlocks+parityBlocks > 256 { // getLockers returns list of remote and local lockers.
return e, reedsolomon.ErrMaxShardNum getLockers func() []dsync.NetLocker
}
// Encoder when needed. // getEndpoints returns list of endpoint strings belonging this set.
var enc reedsolomon.Encoder // some may be local and some remote.
var once sync.Once getEndpoints func() []string
e.encoder = func() reedsolomon.Encoder {
once.Do(func() { // Locker mutex map.
e, err := reedsolomon.New(dataBlocks, parityBlocks, reedsolomon.WithAutoGoroutines(int(e.ShardSize()))) nsMutex *nsLockMap
if err != nil {
// Error conditions should be checked above. // Byte pools used for temporary i/o buffers.
panic(err) bp *bpool.BytePoolCap
}
enc = e mrfUploadCh chan partialUpload
})
return enc
}
return
} }
// EncodeData encodes the given data and returns the erasure-coded data. // NewNSLock - initialize a new namespace RWLocker instance.
// It returns an error if the erasure coding failed. func (er erasureObjects) NewNSLock(ctx context.Context, bucket string, objects ...string) RWLocker {
func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error) { return er.nsMutex.NewNSLock(ctx, er.getLockers, bucket, objects...)
if len(data) == 0 {
return make([][]byte, e.dataBlocks+e.parityBlocks), nil
}
encoded, err := e.encoder().Split(data)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
if err = e.encoder().Encode(encoded); err != nil {
logger.LogIf(ctx, err)
return nil, err
}
return encoded, nil
} }
// DecodeDataBlocks decodes the given erasure-coded data. // Shutdown function for object storage interface.
// It only decodes the data blocks but does not verify them. func (er erasureObjects) Shutdown(ctx context.Context) error {
// It returns an error if the decoding failed. // Add any object layer shutdown activities here.
func (e *Erasure) DecodeDataBlocks(data [][]byte) error { closeStorageDisks(er.getDisks())
var isZero = 0
for _, b := range data[:] {
if len(b) == 0 {
isZero++
break
}
}
if isZero == 0 || isZero == len(data) {
// If all are zero, payload is 0 bytes.
return nil
}
return e.encoder().ReconstructData(data)
}
// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it.
// It returns an error if the decoding failed.
func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error {
needsReconstruction := false
for _, b := range data {
if b == nil {
needsReconstruction = true
break
}
}
if !needsReconstruction {
return nil
}
if err := e.encoder().Reconstruct(data); err != nil {
logger.LogIf(ctx, err)
return err
}
return nil return nil
} }
// ShardSize - returns actual shared size from erasure blockSize. // byDiskTotal is a collection satisfying sort.Interface.
func (e *Erasure) ShardSize() int64 { type byDiskTotal []DiskInfo
return ceilFrac(e.blockSize, int64(e.dataBlocks))
func (d byDiskTotal) Len() int { return len(d) }
func (d byDiskTotal) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d byDiskTotal) Less(i, j int) bool {
return d[i].Total < d[j].Total
} }
// ShardFileSize - returns final erasure size from original size. // getDisksInfo - fetch disks info across all other storage API.
func (e *Erasure) ShardFileSize(totalLength int64) int64 { func getDisksInfo(disks []StorageAPI, local bool) (disksInfo []DiskInfo, errs []error, onlineDisks, offlineDisks madmin.BackendDisks) {
if totalLength == 0 { disksInfo = make([]DiskInfo, len(disks))
return 0 onlineDisks = make(madmin.BackendDisks)
offlineDisks = make(madmin.BackendDisks)
for _, disk := range disks {
if disk == OfflineDisk {
continue
}
peerAddr := disk.Hostname()
if _, ok := offlineDisks[peerAddr]; !ok {
offlineDisks[peerAddr] = 0
}
if _, ok := onlineDisks[peerAddr]; !ok {
onlineDisks[peerAddr] = 0
}
} }
if totalLength == -1 {
return -1 g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == OfflineDisk {
// Storage disk is empty, perhaps ignored disk or not available.
return errDiskNotFound
}
info, err := disks[index].DiskInfo()
if err != nil {
if !IsErr(err, baseErrs...) {
reqInfo := (&logger.ReqInfo{}).AppendTags("disk", disks[index].String())
ctx := logger.SetReqInfo(GlobalContext, reqInfo)
logger.LogIf(ctx, err)
}
return err
}
disksInfo[index] = info
return nil
}, index)
} }
numShards := totalLength / e.blockSize
lastBlockSize := totalLength % int64(e.blockSize) errs = g.Wait()
lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks)) // Wait for the routines.
return numShards*e.ShardSize() + lastShardSize for i, diskInfoErr := range errs {
if disks[i] == OfflineDisk {
continue
}
if diskInfoErr != nil {
offlineDisks[disks[i].Hostname()]++
continue
}
onlineDisks[disks[i].Hostname()]++
}
// Iterate over the passed endpoints arguments and check
// if there are still disks missing from the offline/online lists
// and update them accordingly.
missingOfflineDisks := make(map[string]int)
for _, zone := range globalEndpoints {
for _, endpoint := range zone.Endpoints {
// if local is set and endpoint is not local
// we are not interested in remote disks.
if local && !endpoint.IsLocal {
continue
}
if _, ok := offlineDisks[endpoint.Host]; !ok {
missingOfflineDisks[endpoint.Host]++
}
}
}
for missingDisk, n := range missingOfflineDisks {
onlineDisks[missingDisk] = 0
offlineDisks[missingDisk] = n
}
// Success.
return disksInfo, errs, onlineDisks, offlineDisks
} }
// ShardFileTillOffset - returns the effectiv eoffset where erasure reading begins. // Get an aggregated storage info across all disks.
func (e *Erasure) ShardFileTillOffset(startOffset, length, totalLength int64) int64 { func getStorageInfo(disks []StorageAPI, local bool) (StorageInfo, []error) {
shardSize := e.ShardSize() disksInfo, errs, onlineDisks, offlineDisks := getDisksInfo(disks, local)
shardFileSize := e.ShardFileSize(totalLength)
endShard := (startOffset + int64(length)) / e.blockSize // Sort so that the first element is the smallest.
tillOffset := endShard*shardSize + shardSize sort.Sort(byDiskTotal(disksInfo))
if tillOffset > shardFileSize {
tillOffset = shardFileSize // Combine all disks to get total usage
usedList := make([]uint64, len(disksInfo))
totalList := make([]uint64, len(disksInfo))
availableList := make([]uint64, len(disksInfo))
mountPaths := make([]string, len(disksInfo))
for i, di := range disksInfo {
usedList[i] = di.Used
totalList[i] = di.Total
availableList[i] = di.Free
mountPaths[i] = di.MountPath
} }
return tillOffset
storageInfo := StorageInfo{
Used: usedList,
Total: totalList,
Available: availableList,
MountPaths: mountPaths,
}
storageInfo.Backend.Type = BackendErasure
storageInfo.Backend.OnlineDisks = onlineDisks
storageInfo.Backend.OfflineDisks = offlineDisks
return storageInfo, errs
}
// StorageInfo - returns underlying storage statistics.
func (er erasureObjects) StorageInfo(ctx context.Context, local bool) (StorageInfo, []error) {
disks := er.getDisks()
if local {
var localDisks []StorageAPI
for _, disk := range disks {
if disk != nil {
if disk.IsLocal() {
// Append this local disk since local flag is true
localDisks = append(localDisks, disk)
}
}
}
disks = localDisks
}
return getStorageInfo(disks, local)
}
// GetMetrics - is not implemented and shouldn't be called.
func (er erasureObjects) GetMetrics(ctx context.Context) (*Metrics, error) {
logger.LogIf(ctx, NotImplemented{})
return &Metrics{}, NotImplemented{}
}
// CrawlAndGetDataUsage collects usage from all buckets.
// updates are sent as different parts of the underlying
// structure has been traversed.
func (er erasureObjects) CrawlAndGetDataUsage(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo) error {
return NotImplemented{API: "CrawlAndGetDataUsage"}
}
// CrawlAndGetDataUsage will start crawling buckets and send updated totals as they are traversed.
// Updates are sent on a regular basis and the caller *must* consume them.
func (er erasureObjects) crawlAndGetDataUsage(ctx context.Context, buckets []BucketInfo, bf *bloomFilter, updates chan<- dataUsageCache) error {
var disks []StorageAPI
for _, d := range er.getLoadBalancedDisks() {
if d == nil || !d.IsOnline() {
continue
}
disks = append(disks, d)
}
if len(disks) == 0 || len(buckets) == 0 {
return nil
}
// Load bucket totals
oldCache := dataUsageCache{}
err := oldCache.load(ctx, er, dataUsageCacheName)
if err != nil {
return err
}
// New cache..
cache := dataUsageCache{
Info: dataUsageCacheInfo{
Name: dataUsageRoot,
NextCycle: oldCache.Info.NextCycle,
},
Cache: make(map[string]dataUsageEntry, len(oldCache.Cache)),
}
// Put all buckets into channel.
bucketCh := make(chan BucketInfo, len(buckets))
// Add new buckets first
for _, b := range buckets {
if oldCache.find(b.Name) == nil {
bucketCh <- b
}
}
// Add existing buckets.
for _, b := range buckets {
e := oldCache.find(b.Name)
if e != nil {
bucketCh <- b
cache.replace(b.Name, dataUsageRoot, *e)
}
}
close(bucketCh)
bucketResults := make(chan dataUsageEntryInfo, len(disks))
// Start async collector/saver.
// This goroutine owns the cache.
var saverWg sync.WaitGroup
saverWg.Add(1)
go func() {
const updateTime = 30 * time.Second
t := time.NewTicker(updateTime)
defer t.Stop()
defer saverWg.Done()
var lastSave time.Time
saveLoop:
for {
select {
case <-ctx.Done():
// Return without saving.
return
case <-t.C:
if cache.Info.LastUpdate.Equal(lastSave) {
continue
}
logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName))
updates <- cache.clone()
lastSave = cache.Info.LastUpdate
case v, ok := <-bucketResults:
if !ok {
break saveLoop
}
cache.replace(v.Name, v.Parent, v.Entry)
cache.Info.LastUpdate = time.Now()
}
}
// Save final state...
cache.Info.NextCycle++
cache.Info.LastUpdate = time.Now()
logger.LogIf(ctx, cache.save(ctx, er, dataUsageCacheName))
updates <- cache
}()
// Start one crawler per disk
var wg sync.WaitGroup
wg.Add(len(disks))
for i := range disks {
go func(i int) {
defer wg.Done()
disk := disks[i]
for bucket := range bucketCh {
select {
case <-ctx.Done():
return
default:
}
// Load cache for bucket
cacheName := pathJoin(bucket.Name, dataUsageCacheName)
cache := dataUsageCache{}
logger.LogIf(ctx, cache.load(ctx, er, cacheName))
if cache.Info.Name == "" {
cache.Info.Name = bucket.Name
}
if cache.Info.Name != bucket.Name {
logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name))
cache.Info = dataUsageCacheInfo{
Name: bucket.Name,
LastUpdate: time.Time{},
NextCycle: 0,
}
}
// Calc usage
before := cache.Info.LastUpdate
cache, err = disk.CrawlAndGetDataUsage(ctx, cache)
if err != nil {
logger.LogIf(ctx, err)
if cache.Info.LastUpdate.After(before) {
logger.LogIf(ctx, cache.save(ctx, er, cacheName))
}
continue
}
var root dataUsageEntry
if r := cache.root(); r != nil {
root = cache.flatten(*r)
}
bucketResults <- dataUsageEntryInfo{
Name: cache.Info.Name,
Parent: dataUsageRoot,
Entry: root,
}
// Save cache
logger.LogIf(ctx, cache.save(ctx, er, cacheName))
}
}(i)
}
wg.Wait()
close(bucketResults)
saverWg.Wait()
return nil
}
// IsReady - shouldn't be called will panic.
func (er erasureObjects) IsReady(ctx context.Context) bool {
logger.CriticalIf(ctx, NotImplemented{})
return true
} }

View File

@ -1,5 +1,5 @@
/* /*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc. * MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -130,7 +130,7 @@ func newErasureTestSetup(dataBlocks int, parityBlocks int, blockSize int64) (*er
disks := make([]StorageAPI, len(diskPaths)) disks := make([]StorageAPI, len(diskPaths))
var err error var err error
for i := range diskPaths { for i := range diskPaths {
disks[i], diskPaths[i], err = newPosixTestSetup() disks[i], diskPaths[i], err = newXLStorageTestSetup()
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -366,7 +366,7 @@ func loadAndValidateCacheFormat(ctx context.Context, drives []string) (formats [
func migrateCacheData(ctx context.Context, c *diskCache, bucket, object, oldfile, destDir string, metadata map[string]string) error { func migrateCacheData(ctx context.Context, c *diskCache, bucket, object, oldfile, destDir string, metadata map[string]string) error {
st, err := os.Stat(oldfile) st, err := os.Stat(oldfile)
if err != nil { if err != nil {
err = osErrToFSFileErr(err) err = osErrToFileErr(err)
return err return err
} }
readCloser, err := readCacheFileStream(oldfile, 0, st.Size()) readCloser, err := readCacheFileStream(oldfile, 0, st.Size())

View File

@ -36,20 +36,23 @@ import (
) )
const ( const (
// Represents XL backend. // Represents Erasure backend.
formatBackendXL = "xl" formatBackendErasure = "xl"
// formatXLV1.XL.Version - version '1'. // formatErasureV1.Erasure.Version - version '1'.
formatXLVersionV1 = "1" formatErasureVersionV1 = "1"
// formatXLV2.XL.Version - version '2'. // formatErasureV2.Erasure.Version - version '2'.
formatXLVersionV2 = "2" formatErasureVersionV2 = "2"
// formatXLV3.XL.Version - version '3'. // formatErasureV3.Erasure.Version - version '3'.
formatXLVersionV3 = "3" formatErasureVersionV3 = "3"
// Distribution algorithm used. // Distribution algorithm used, legacy
formatXLVersionV2DistributionAlgo = "CRCMOD" formatErasureVersionV2DistributionAlgoLegacy = "CRCMOD"
// Distributed algorithm used, current
formatErasureVersionV3DistributionAlgo = "SIPMOD"
) )
// Offline disk UUID represents an offline disk. // Offline disk UUID represents an offline disk.
@ -68,34 +71,34 @@ var formatCriticalErrors = map[error]struct{}{
} }
// Used to detect the version of "xl" format. // Used to detect the version of "xl" format.
type formatXLVersionDetect struct { type formatErasureVersionDetect struct {
XL struct { Erasure struct {
Version string `json:"version"` Version string `json:"version"`
} `json:"xl"` } `json:"xl"`
} }
// Represents the V1 backend disk structure version // Represents the V1 backend disk structure version
// under `.minio.sys` and actual data namespace. // under `.minio.sys` and actual data namespace.
// formatXLV1 - structure holds format config version '1'. // formatErasureV1 - structure holds format config version '1'.
type formatXLV1 struct { type formatErasureV1 struct {
formatMetaV1 formatMetaV1
XL struct { Erasure struct {
Version string `json:"version"` // Version of 'xl' format. Version string `json:"version"` // Version of 'xl' format.
Disk string `json:"disk"` // Disk field carries assigned disk uuid. Disk string `json:"disk"` // Disk field carries assigned disk uuid.
// JBOD field carries the input disk order generated the first // JBOD field carries the input disk order generated the first
// time when fresh disks were supplied. // time when fresh disks were supplied.
JBOD []string `json:"jbod"` JBOD []string `json:"jbod"`
} `json:"xl"` // XL field holds xl format. } `json:"xl"` // Erasure field holds xl format.
} }
// Represents the V2 backend disk structure version // Represents the V2 backend disk structure version
// under `.minio.sys` and actual data namespace. // under `.minio.sys` and actual data namespace.
// formatXLV2 - structure holds format config version '2'. // formatErasureV2 - structure holds format config version '2'.
// The V2 format to support "large bucket" support where a bucket // The V2 format to support "large bucket" support where a bucket
// can span multiple erasure sets. // can span multiple erasure sets.
type formatXLV2 struct { type formatErasureV2 struct {
formatMetaV1 formatMetaV1
XL struct { Erasure struct {
Version string `json:"version"` // Version of 'xl' format. Version string `json:"version"` // Version of 'xl' format.
This string `json:"this"` // This field carries assigned disk uuid. This string `json:"this"` // This field carries assigned disk uuid.
// Sets field carries the input disk order generated the first // Sets field carries the input disk order generated the first
@ -108,13 +111,13 @@ type formatXLV2 struct {
} `json:"xl"` } `json:"xl"`
} }
// formatXLV3 struct is same as formatXLV2 struct except that formatXLV3.XL.Version is "3" indicating // formatErasureV3 struct is same as formatErasureV2 struct except that formatErasureV3.Erasure.Version is "3" indicating
// the simplified multipart backend which is a flat hierarchy now. // the simplified multipart backend which is a flat hierarchy now.
// In .minio.sys/multipart we have: // In .minio.sys/multipart we have:
// sha256(bucket/object)/uploadID/[xl.json, part.1, part.2 ....] // sha256(bucket/object)/uploadID/[xl.meta, part.1, part.2 ....]
type formatXLV3 struct { type formatErasureV3 struct {
formatMetaV1 formatMetaV1
XL struct { Erasure struct {
Version string `json:"version"` // Version of 'xl' format. Version string `json:"version"` // Version of 'xl' format.
This string `json:"this"` // This field carries assigned disk uuid. This string `json:"this"` // This field carries assigned disk uuid.
// Sets field carries the input disk order generated the first // Sets field carries the input disk order generated the first
@ -127,40 +130,40 @@ type formatXLV3 struct {
} `json:"xl"` } `json:"xl"`
} }
func (f *formatXLV3) Clone() *formatXLV3 { func (f *formatErasureV3) Clone() *formatErasureV3 {
b, err := json.Marshal(f) b, err := json.Marshal(f)
if err != nil { if err != nil {
panic(err) panic(err)
} }
var dst formatXLV3 var dst formatErasureV3
if err = json.Unmarshal(b, &dst); err != nil { if err = json.Unmarshal(b, &dst); err != nil {
panic(err) panic(err)
} }
return &dst return &dst
} }
// Returns formatXL.XL.Version // Returns formatErasure.Erasure.Version
func newFormatXLV3(numSets int, setLen int) *formatXLV3 { func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 {
format := &formatXLV3{} format := &formatErasureV3{}
format.Version = formatMetaVersionV1 format.Version = formatMetaVersionV1
format.Format = formatBackendXL format.Format = formatBackendErasure
format.ID = mustGetUUID() format.ID = mustGetUUID()
format.XL.Version = formatXLVersionV3 format.Erasure.Version = formatErasureVersionV3
format.XL.DistributionAlgo = formatXLVersionV2DistributionAlgo format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgo
format.XL.Sets = make([][]string, numSets) format.Erasure.Sets = make([][]string, numSets)
for i := 0; i < numSets; i++ { for i := 0; i < numSets; i++ {
format.XL.Sets[i] = make([]string, setLen) format.Erasure.Sets[i] = make([]string, setLen)
for j := 0; j < setLen; j++ { for j := 0; j < setLen; j++ {
format.XL.Sets[i][j] = mustGetUUID() format.Erasure.Sets[i][j] = mustGetUUID()
} }
} }
return format return format
} }
// Returns format XL version after reading `format.json`, returns // Returns format Erasure version after reading `format.json`, returns
// successfully the version only if the backend is XL. // successfully the version only if the backend is Erasure.
func formatGetBackendXLVersion(formatPath string) (string, error) { func formatGetBackendErasureVersion(formatPath string) (string, error) {
meta := &formatMetaV1{} meta := &formatMetaV1{}
b, err := ioutil.ReadFile(formatPath) b, err := ioutil.ReadFile(formatPath)
if err != nil { if err != nil {
@ -172,42 +175,42 @@ func formatGetBackendXLVersion(formatPath string) (string, error) {
if meta.Version != formatMetaVersionV1 { if meta.Version != formatMetaVersionV1 {
return "", fmt.Errorf(`format.Version expected: %s, got: %s`, formatMetaVersionV1, meta.Version) return "", fmt.Errorf(`format.Version expected: %s, got: %s`, formatMetaVersionV1, meta.Version)
} }
if meta.Format != formatBackendXL { if meta.Format != formatBackendErasure {
return "", fmt.Errorf(`found backend %s, expected %s`, meta.Format, formatBackendXL) return "", fmt.Errorf(`found backend %s, expected %s`, meta.Format, formatBackendErasure)
} }
// XL backend found, proceed to detect version. // Erasure backend found, proceed to detect version.
format := &formatXLVersionDetect{} format := &formatErasureVersionDetect{}
if err = json.Unmarshal(b, format); err != nil { if err = json.Unmarshal(b, format); err != nil {
return "", err return "", err
} }
return format.XL.Version, nil return format.Erasure.Version, nil
} }
// Migrates all previous versions to latest version of `format.json`, // Migrates all previous versions to latest version of `format.json`,
// this code calls migration in sequence, such as V1 is migrated to V2 // this code calls migration in sequence, such as V1 is migrated to V2
// first before it V2 migrates to V3. // first before it V2 migrates to V3.
func formatXLMigrate(export string) error { func formatErasureMigrate(export string) error {
formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) formatPath := pathJoin(export, minioMetaBucket, formatConfigFile)
version, err := formatGetBackendXLVersion(formatPath) version, err := formatGetBackendErasureVersion(formatPath)
if err != nil { if err != nil {
return err return err
} }
switch version { switch version {
case formatXLVersionV1: case formatErasureVersionV1:
if err = formatXLMigrateV1ToV2(export, version); err != nil { if err = formatErasureMigrateV1ToV2(export, version); err != nil {
return err return err
} }
// Migrate successful v1 => v2, proceed to v2 => v3 // Migrate successful v1 => v2, proceed to v2 => v3
version = formatXLVersionV2 version = formatErasureVersionV2
fallthrough fallthrough
case formatXLVersionV2: case formatErasureVersionV2:
if err = formatXLMigrateV2ToV3(export, version); err != nil { if err = formatErasureMigrateV2ToV3(export, version); err != nil {
return err return err
} }
// Migrate successful v2 => v3, v3 is latest // Migrate successful v2 => v3, v3 is latest
// version = formatXLVersionV3 // version = formatXLVersionV3
fallthrough fallthrough
case formatXLVersionV3: case formatErasureVersionV3:
// v3 is the latest version, return. // v3 is the latest version, return.
return nil return nil
} }
@ -216,14 +219,14 @@ func formatXLMigrate(export string) error {
// Migrates version V1 of format.json to version V2 of format.json, // Migrates version V1 of format.json to version V2 of format.json,
// migration fails upon any error. // migration fails upon any error.
func formatXLMigrateV1ToV2(export, version string) error { func formatErasureMigrateV1ToV2(export, version string) error {
if version != formatXLVersionV1 { if version != formatErasureVersionV1 {
return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatXLVersionV1, version) return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatErasureVersionV1, version)
} }
formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) formatPath := pathJoin(export, minioMetaBucket, formatConfigFile)
formatV1 := &formatXLV1{} formatV1 := &formatErasureV1{}
b, err := ioutil.ReadFile(formatPath) b, err := ioutil.ReadFile(formatPath)
if err != nil { if err != nil {
return err return err
@ -232,15 +235,15 @@ func formatXLMigrateV1ToV2(export, version string) error {
return err return err
} }
formatV2 := &formatXLV2{} formatV2 := &formatErasureV2{}
formatV2.Version = formatMetaVersionV1 formatV2.Version = formatMetaVersionV1
formatV2.Format = formatBackendXL formatV2.Format = formatBackendErasure
formatV2.XL.Version = formatXLVersionV2 formatV2.Erasure.Version = formatErasureVersionV2
formatV2.XL.DistributionAlgo = formatXLVersionV2DistributionAlgo formatV2.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoLegacy
formatV2.XL.This = formatV1.XL.Disk formatV2.Erasure.This = formatV1.Erasure.Disk
formatV2.XL.Sets = make([][]string, 1) formatV2.Erasure.Sets = make([][]string, 1)
formatV2.XL.Sets[0] = make([]string, len(formatV1.XL.JBOD)) formatV2.Erasure.Sets[0] = make([]string, len(formatV1.Erasure.JBOD))
copy(formatV2.XL.Sets[0], formatV1.XL.JBOD) copy(formatV2.Erasure.Sets[0], formatV1.Erasure.JBOD)
b, err = json.Marshal(formatV2) b, err = json.Marshal(formatV2)
if err != nil { if err != nil {
@ -250,13 +253,13 @@ func formatXLMigrateV1ToV2(export, version string) error {
} }
// Migrates V2 for format.json to V3 (Flat hierarchy for multipart) // Migrates V2 for format.json to V3 (Flat hierarchy for multipart)
func formatXLMigrateV2ToV3(export, version string) error { func formatErasureMigrateV2ToV3(export, version string) error {
if version != formatXLVersionV2 { if version != formatErasureVersionV2 {
return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatXLVersionV2, version) return fmt.Errorf(`Disk %s: format version expected %s, found %s`, export, formatErasureVersionV2, version)
} }
formatPath := pathJoin(export, minioMetaBucket, formatConfigFile) formatPath := pathJoin(export, minioMetaBucket, formatConfigFile)
formatV2 := &formatXLV2{} formatV2 := &formatErasureV2{}
b, err := ioutil.ReadFile(formatPath) b, err := ioutil.ReadFile(formatPath)
if err != nil { if err != nil {
return err return err
@ -276,13 +279,13 @@ func formatXLMigrateV2ToV3(export, version string) error {
// format-V2 struct is exactly same as format-V1 except that version is "3" // format-V2 struct is exactly same as format-V1 except that version is "3"
// which indicates the simplified multipart backend. // which indicates the simplified multipart backend.
formatV3 := formatXLV3{} formatV3 := formatErasureV3{}
formatV3.Version = formatV2.Version formatV3.Version = formatV2.Version
formatV3.Format = formatV2.Format formatV3.Format = formatV2.Format
formatV3.XL = formatV2.XL formatV3.Erasure = formatV2.Erasure
formatV3.XL.Version = formatXLVersionV3 formatV3.Erasure.Version = formatErasureVersionV3
b, err = json.Marshal(formatV3) b, err = json.Marshal(formatV3)
if err != nil { if err != nil {
@ -303,7 +306,7 @@ func countErrs(errs []error, err error) int {
} }
// Does all errors indicate we need to initialize all disks?. // Does all errors indicate we need to initialize all disks?.
func shouldInitXLDisks(errs []error) bool { func shouldInitErasureDisks(errs []error) bool {
return countErrs(errs, errUnformattedDisk) == len(errs) return countErrs(errs, errUnformattedDisk) == len(errs)
} }
@ -312,13 +315,13 @@ func quorumUnformattedDisks(errs []error) bool {
return countErrs(errs, errUnformattedDisk) >= (len(errs)/2)+1 return countErrs(errs, errUnformattedDisk) >= (len(errs)/2)+1
} }
// loadFormatXLAll - load all format config from all input disks in parallel. // loadFormatErasureAll - load all format config from all input disks in parallel.
func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []error) { func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasureV3, []error) {
// Initialize list of errors. // Initialize list of errors.
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
// Initialize format configs. // Initialize format configs.
var formats = make([]*formatXLV3, len(storageDisks)) var formats = make([]*formatErasureV3, len(storageDisks))
// Load format from each disk in parallel // Load format from each disk in parallel
for index := range storageDisks { for index := range storageDisks {
@ -327,7 +330,7 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err
if storageDisks[index] == nil { if storageDisks[index] == nil {
return errDiskNotFound return errDiskNotFound
} }
format, err := loadFormatXL(storageDisks[index]) format, err := loadFormatErasure(storageDisks[index])
if err != nil { if err != nil {
return err return err
} }
@ -335,7 +338,7 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err
if !heal { if !heal {
// If no healing required, make the disks valid and // If no healing required, make the disks valid and
// online. // online.
storageDisks[index].SetDiskID(format.XL.This) storageDisks[index].SetDiskID(format.Erasure.This)
} }
return nil return nil
}, index) }, index)
@ -345,12 +348,12 @@ func loadFormatXLAll(storageDisks []StorageAPI, heal bool) ([]*formatXLV3, []err
return formats, g.Wait() return formats, g.Wait()
} }
func saveFormatXL(disk StorageAPI, format interface{}, diskID string) error { func saveFormatErasure(disk StorageAPI, format interface{}, diskID string) error {
if format == nil || disk == nil { if format == nil || disk == nil {
return errDiskNotFound return errDiskNotFound
} }
if err := makeFormatXLMetaVolumes(disk); err != nil { if err := makeFormatErasureMetaVolumes(disk); err != nil {
return err return err
} }
@ -398,8 +401,8 @@ func isHiddenDirectories(vols ...VolInfo) bool {
return true return true
} }
// loadFormatXL - loads format.json from disk. // loadFormatErasure - loads format.json from disk.
func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) { func loadFormatErasure(disk StorageAPI) (format *formatErasureV3, err error) {
buf, err := disk.ReadAll(minioMetaBucket, formatConfigFile) buf, err := disk.ReadAll(minioMetaBucket, formatConfigFile)
if err != nil { if err != nil {
// 'file not found' and 'volume not found' as // 'file not found' and 'volume not found' as
@ -421,7 +424,7 @@ func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) {
} }
// Try to decode format json into formatConfigV1 struct. // Try to decode format json into formatConfigV1 struct.
format = &formatXLV3{} format = &formatErasureV3{}
if err = json.Unmarshal(buf, format); err != nil { if err = json.Unmarshal(buf, format); err != nil {
return nil, err return nil, err
} }
@ -430,56 +433,56 @@ func loadFormatXL(disk StorageAPI) (format *formatXLV3, err error) {
return format, nil return format, nil
} }
// Valid formatXL basic versions. // Valid formatErasure basic versions.
func checkFormatXLValue(formatXL *formatXLV3) error { func checkFormatErasureValue(formatErasure *formatErasureV3) error {
// Validate format version and format type. // Validate format version and format type.
if formatXL.Version != formatMetaVersionV1 { if formatErasure.Version != formatMetaVersionV1 {
return fmt.Errorf("Unsupported version of backend format [%s] found", formatXL.Version) return fmt.Errorf("Unsupported version of backend format [%s] found", formatErasure.Version)
} }
if formatXL.Format != formatBackendXL { if formatErasure.Format != formatBackendErasure {
return fmt.Errorf("Unsupported backend format [%s] found", formatXL.Format) return fmt.Errorf("Unsupported backend format [%s] found", formatErasure.Format)
} }
if formatXL.XL.Version != formatXLVersionV3 { if formatErasure.Erasure.Version != formatErasureVersionV3 {
return fmt.Errorf("Unsupported XL backend format found [%s]", formatXL.XL.Version) return fmt.Errorf("Unsupported Erasure backend format found [%s]", formatErasure.Erasure.Version)
} }
return nil return nil
} }
// Check all format values. // Check all format values.
func checkFormatXLValues(formats []*formatXLV3, drivesPerSet int) error { func checkFormatErasureValues(formats []*formatErasureV3, drivesPerSet int) error {
for i, formatXL := range formats { for i, formatErasure := range formats {
if formatXL == nil { if formatErasure == nil {
continue continue
} }
if err := checkFormatXLValue(formatXL); err != nil { if err := checkFormatErasureValue(formatErasure); err != nil {
return err return err
} }
if len(formats) != len(formatXL.XL.Sets)*len(formatXL.XL.Sets[0]) { if len(formats) != len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]) {
return fmt.Errorf("%s disk is already being used in another erasure deployment. (Number of disks specified: %d but the number of disks found in the %s disk's format.json: %d)", return fmt.Errorf("%s disk is already being used in another erasure deployment. (Number of disks specified: %d but the number of disks found in the %s disk's format.json: %d)",
humanize.Ordinal(i+1), len(formats), humanize.Ordinal(i+1), len(formatXL.XL.Sets)*len(formatXL.XL.Sets[0])) humanize.Ordinal(i+1), len(formats), humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0]))
} }
// Only if custom erasure drive count is set, // Only if custom erasure drive count is set,
// we should fail here other proceed to honor what // we should fail here other proceed to honor what
// is present on the disk. // is present on the disk.
if globalCustomErasureDriveCount && len(formatXL.XL.Sets[0]) != drivesPerSet { if globalCustomErasureDriveCount && len(formatErasure.Erasure.Sets[0]) != drivesPerSet {
return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatXL.XL.Sets[0]), drivesPerSet) return fmt.Errorf("%s disk is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets[0]), drivesPerSet)
} }
} }
return nil return nil
} }
// Get Deployment ID for the XL sets from format.json. // Get Deployment ID for the Erasure sets from format.json.
// This need not be in quorum. Even if one of the format.json // This need not be in quorum. Even if one of the format.json
// file has this value, we assume it is valid. // file has this value, we assume it is valid.
// If more than one format.json's have different id, it is considered a corrupt // If more than one format.json's have different id, it is considered a corrupt
// backend format. // backend format.
func formatXLGetDeploymentID(refFormat *formatXLV3, formats []*formatXLV3) (string, error) { func formatErasureGetDeploymentID(refFormat *formatErasureV3, formats []*formatErasureV3) (string, error) {
var deploymentID string var deploymentID string
for _, format := range formats { for _, format := range formats {
if format == nil || format.ID == "" { if format == nil || format.ID == "" {
continue continue
} }
if reflect.DeepEqual(format.XL.Sets, refFormat.XL.Sets) { if reflect.DeepEqual(format.Erasure.Sets, refFormat.Erasure.Sets) {
// Found an ID in one of the format.json file // Found an ID in one of the format.json file
// Set deploymentID for the first time. // Set deploymentID for the first time.
if deploymentID == "" { if deploymentID == "" {
@ -494,11 +497,11 @@ func formatXLGetDeploymentID(refFormat *formatXLV3, formats []*formatXLV3) (stri
return deploymentID, nil return deploymentID, nil
} }
// formatXLFixDeploymentID - Add deployment id if it is not present. // formatErasureFixDeploymentID - Add deployment id if it is not present.
func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) (err error) { func formatErasureFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatErasureV3) (err error) {
// Attempt to load all `format.json` from all disks. // Attempt to load all `format.json` from all disks.
var sErrs []error var sErrs []error
formats, sErrs := loadFormatXLAll(storageDisks, false) formats, sErrs := loadFormatErasureAll(storageDisks, false)
for i, sErr := range sErrs { for i, sErr := range sErrs {
if _, ok := formatCriticalErrors[sErr]; ok { if _, ok := formatCriticalErrors[sErr]; ok {
return config.ErrCorruptedBackend(err).Hint(fmt.Sprintf("Clear any pre-existing content on %s", endpoints[i])) return config.ErrCorruptedBackend(err).Hint(fmt.Sprintf("Clear any pre-existing content on %s", endpoints[i]))
@ -506,13 +509,13 @@ func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, ref
} }
for index := range formats { for index := range formats {
// If the XL sets do not match, set those formats to nil, // If the Erasure sets do not match, set those formats to nil,
// We do not have to update the ID on those format.json file. // We do not have to update the ID on those format.json file.
if formats[index] != nil && !reflect.DeepEqual(formats[index].XL.Sets, refFormat.XL.Sets) { if formats[index] != nil && !reflect.DeepEqual(formats[index].Erasure.Sets, refFormat.Erasure.Sets) {
formats[index] = nil formats[index] = nil
} }
} }
refFormat.ID, err = formatXLGetDeploymentID(refFormat, formats) refFormat.ID, err = formatErasureGetDeploymentID(refFormat, formats)
if err != nil { if err != nil {
return err return err
} }
@ -534,12 +537,12 @@ func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, ref
} }
// Deployment ID needs to be set on all the disks. // Deployment ID needs to be set on all the disks.
// Save `format.json` across all disks. // Save `format.json` across all disks.
return saveFormatXLAll(GlobalContext, storageDisks, formats) return saveFormatErasureAll(GlobalContext, storageDisks, formats)
} }
// Update only the valid local disks which have not been updated before. // Update only the valid local disks which have not been updated before.
func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) error { func formatErasureFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatErasureV3) error {
// If this server was down when the deploymentID was updated // If this server was down when the deploymentID was updated
// then we make sure that we update the local disks with the deploymentID. // then we make sure that we update the local disks with the deploymentID.
@ -550,7 +553,7 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI
index := index index := index
g.Go(func() error { g.Go(func() error {
if endpoints[index].IsLocal && storageDisks[index] != nil && storageDisks[index].IsOnline() { if endpoints[index].IsLocal && storageDisks[index] != nil && storageDisks[index].IsOnline() {
format, err := loadFormatXL(storageDisks[index]) format, err := loadFormatErasure(storageDisks[index])
if err != nil { if err != nil {
// Disk can be offline etc. // Disk can be offline etc.
// ignore the errors seen here. // ignore the errors seen here.
@ -559,11 +562,11 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI
if format.ID != "" { if format.ID != "" {
return nil return nil
} }
if !reflect.DeepEqual(format.XL.Sets, refFormat.XL.Sets) { if !reflect.DeepEqual(format.Erasure.Sets, refFormat.Erasure.Sets) {
return nil return nil
} }
format.ID = refFormat.ID format.ID = refFormat.ID
if err := saveFormatXL(storageDisks[index], format, format.XL.This); err != nil { if err := saveFormatErasure(storageDisks[index], format, format.Erasure.This); err != nil {
logger.LogIf(GlobalContext, err) logger.LogIf(GlobalContext, err)
return fmt.Errorf("Unable to save format.json, %w", err) return fmt.Errorf("Unable to save format.json, %w", err)
} }
@ -579,15 +582,15 @@ func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI
return nil return nil
} }
// Get backend XL format in quorum `format.json`. // Get backend Erasure format in quorum `format.json`.
func getFormatXLInQuorum(formats []*formatXLV3) (*formatXLV3, error) { func getFormatErasureInQuorum(formats []*formatErasureV3) (*formatErasureV3, error) {
formatHashes := make([]string, len(formats)) formatHashes := make([]string, len(formats))
for i, format := range formats { for i, format := range formats {
if format == nil { if format == nil {
continue continue
} }
h := sha256.New() h := sha256.New()
for _, set := range format.XL.Sets { for _, set := range format.Erasure.Sets {
for _, diskID := range set { for _, diskID := range set {
h.Write([]byte(diskID)) h.Write([]byte(diskID))
} }
@ -613,55 +616,55 @@ func getFormatXLInQuorum(formats []*formatXLV3) (*formatXLV3, error) {
} }
if maxCount < len(formats)/2 { if maxCount < len(formats)/2 {
return nil, errXLReadQuorum return nil, errErasureReadQuorum
} }
for i, hash := range formatHashes { for i, hash := range formatHashes {
if hash == maxHash { if hash == maxHash {
format := formats[i].Clone() format := formats[i].Clone()
format.XL.This = "" format.Erasure.This = ""
return format, nil return format, nil
} }
} }
return nil, errXLReadQuorum return nil, errErasureReadQuorum
} }
func formatXLV3Check(reference *formatXLV3, format *formatXLV3) error { func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) error {
tmpFormat := format.Clone() tmpFormat := format.Clone()
this := tmpFormat.XL.This this := tmpFormat.Erasure.This
tmpFormat.XL.This = "" tmpFormat.Erasure.This = ""
if len(reference.XL.Sets) != len(format.XL.Sets) { if len(reference.Erasure.Sets) != len(format.Erasure.Sets) {
return fmt.Errorf("Expected number of sets %d, got %d", len(reference.XL.Sets), len(format.XL.Sets)) return fmt.Errorf("Expected number of sets %d, got %d", len(reference.Erasure.Sets), len(format.Erasure.Sets))
} }
// Make sure that the sets match. // Make sure that the sets match.
for i := range reference.XL.Sets { for i := range reference.Erasure.Sets {
if len(reference.XL.Sets[i]) != len(format.XL.Sets[i]) { if len(reference.Erasure.Sets[i]) != len(format.Erasure.Sets[i]) {
return fmt.Errorf("Each set should be of same size, expected %d got %d", return fmt.Errorf("Each set should be of same size, expected %d got %d",
len(reference.XL.Sets[i]), len(format.XL.Sets[i])) len(reference.Erasure.Sets[i]), len(format.Erasure.Sets[i]))
} }
for j := range reference.XL.Sets[i] { for j := range reference.Erasure.Sets[i] {
if reference.XL.Sets[i][j] != format.XL.Sets[i][j] { if reference.Erasure.Sets[i][j] != format.Erasure.Sets[i][j] {
return fmt.Errorf("UUID on positions %d:%d do not match with, expected %s got %s", return fmt.Errorf("UUID on positions %d:%d do not match with, expected %s got %s",
i, j, reference.XL.Sets[i][j], format.XL.Sets[i][j]) i, j, reference.Erasure.Sets[i][j], format.Erasure.Sets[i][j])
} }
} }
} }
// Make sure that the diskID is found in the set. // Make sure that the diskID is found in the set.
for i := 0; i < len(tmpFormat.XL.Sets); i++ { for i := 0; i < len(tmpFormat.Erasure.Sets); i++ {
for j := 0; j < len(tmpFormat.XL.Sets[i]); j++ { for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ {
if this == tmpFormat.XL.Sets[i][j] { if this == tmpFormat.Erasure.Sets[i][j] {
return nil return nil
} }
} }
} }
return fmt.Errorf("Disk ID %s not found in any disk sets %s", this, format.XL.Sets) return fmt.Errorf("Disk ID %s not found in any disk sets %s", this, format.Erasure.Sets)
} }
// Initializes meta volume only on local storage disks. // Initializes meta volume only on local storage disks.
func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatXLV3) error { func initErasureMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatErasureV3) error {
// Compute the local disks eligible for meta volumes (re)initialization // Compute the local disks eligible for meta volumes (re)initialization
var disksToInit []StorageAPI var disksToInit []StorageAPI
@ -682,7 +685,7 @@ func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatX
// goroutine will return its own instance of index variable. // goroutine will return its own instance of index variable.
index := index index := index
g.Go(func() error { g.Go(func() error {
return makeFormatXLMetaVolumes(disksToInit[index]) return makeFormatErasureMetaVolumes(disksToInit[index])
}, index) }, index)
} }
@ -698,15 +701,15 @@ func initXLMetaVolumesInLocalDisks(storageDisks []StorageAPI, formats []*formatX
return nil return nil
} }
// saveFormatXLAll - populates `format.json` on disks in its order. // saveFormatErasureAll - populates `format.json` on disks in its order.
func saveFormatXLAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatXLV3) error { func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, formats []*formatErasureV3) error {
g := errgroup.WithNErrs(len(storageDisks)) g := errgroup.WithNErrs(len(storageDisks))
// Write `format.json` to all disks. // Write `format.json` to all disks.
for index := range storageDisks { for index := range storageDisks {
index := index index := index
g.Go(func() error { g.Go(func() error {
return saveFormatXL(storageDisks[index], formats[index], formats[index].XL.This) return saveFormatErasure(storageDisks[index], formats[index], formats[index].Erasure.This)
}, index) }, index)
} }
@ -745,9 +748,9 @@ func initStorageDisksWithErrors(endpoints Endpoints) ([]StorageAPI, []error) {
return storageDisks, g.Wait() return storageDisks, g.Wait()
} }
// formatXLV3ThisEmpty - find out if '.This' field is empty // formatErasureV3ThisEmpty - find out if '.This' field is empty
// in any of the input `formats`, if yes return true. // in any of the input `formats`, if yes return true.
func formatXLV3ThisEmpty(formats []*formatXLV3) bool { func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool {
for _, format := range formats { for _, format := range formats {
if format == nil { if format == nil {
continue continue
@ -756,18 +759,18 @@ func formatXLV3ThisEmpty(formats []*formatXLV3) bool {
// V1 to V2 to V3, in a scenario such as this we only need to handle // V1 to V2 to V3, in a scenario such as this we only need to handle
// single sets since we never used to support multiple sets in releases // single sets since we never used to support multiple sets in releases
// with V1 format version. // with V1 format version.
if len(format.XL.Sets) > 1 { if len(format.Erasure.Sets) > 1 {
continue continue
} }
if format.XL.This == "" { if format.Erasure.This == "" {
return true return true
} }
} }
return false return false
} }
// fixFormatXLV3 - fix format XL configuration on all disks. // fixFormatErasureV3 - fix format Erasure configuration on all disks.
func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatXLV3) error { func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error {
g := errgroup.WithNErrs(len(formats)) g := errgroup.WithNErrs(len(formats))
for i := range formats { for i := range formats {
i := i i := i
@ -779,12 +782,12 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*fo
// V1 to V2 to V3, in a scenario such as this we only need to handle // V1 to V2 to V3, in a scenario such as this we only need to handle
// single sets since we never used to support multiple sets in releases // single sets since we never used to support multiple sets in releases
// with V1 format version. // with V1 format version.
if len(formats[i].XL.Sets) > 1 { if len(formats[i].Erasure.Sets) > 1 {
return nil return nil
} }
if formats[i].XL.This == "" { if formats[i].Erasure.This == "" {
formats[i].XL.This = formats[i].XL.Sets[0][i] formats[i].Erasure.This = formats[i].Erasure.Sets[0][i]
if err := saveFormatXL(storageDisks[i], formats[i], formats[i].XL.This); err != nil { if err := saveFormatErasure(storageDisks[i], formats[i], formats[i].Erasure.This); err != nil {
return err return err
} }
} }
@ -800,10 +803,10 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*fo
} }
// initFormatXL - save XL format configuration on all disks. // initFormatErasure - save Erasure format configuration on all disks.
func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, drivesPerSet int, deploymentID string) (*formatXLV3, error) { func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, drivesPerSet int, deploymentID string) (*formatErasureV3, error) {
format := newFormatXLV3(setCount, drivesPerSet) format := newFormatErasureV3(setCount, drivesPerSet)
formats := make([]*formatXLV3, len(storageDisks)) formats := make([]*formatErasureV3, len(storageDisks))
wantAtMost := ecDrivesNoConfig(drivesPerSet) wantAtMost := ecDrivesNoConfig(drivesPerSet)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
@ -811,7 +814,7 @@ func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, driv
for j := 0; j < drivesPerSet; j++ { for j := 0; j < drivesPerSet; j++ {
disk := storageDisks[i*drivesPerSet+j] disk := storageDisks[i*drivesPerSet+j]
newFormat := format.Clone() newFormat := format.Clone()
newFormat.XL.This = format.XL.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
if deploymentID != "" { if deploymentID != "" {
newFormat.ID = deploymentID newFormat.ID = deploymentID
} }
@ -843,11 +846,11 @@ func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, driv
} }
// Save formats `format.json` across all disks. // Save formats `format.json` across all disks.
if err := saveFormatXLAll(ctx, storageDisks, formats); err != nil { if err := saveFormatErasureAll(ctx, storageDisks, formats); err != nil {
return nil, err return nil, err
} }
return getFormatXLInQuorum(formats) return getFormatErasureInQuorum(formats)
} }
// ecDrivesNoConfig returns the erasure coded drives in a set if no config has been set. // ecDrivesNoConfig returns the erasure coded drives in a set if no config has been set.
@ -866,8 +869,8 @@ func ecDrivesNoConfig(drivesPerSet int) int {
return ecDrives return ecDrives
} }
// Make XL backend meta volumes. // Make Erasure backend meta volumes.
func makeFormatXLMetaVolumes(disk StorageAPI) error { func makeFormatErasureMetaVolumes(disk StorageAPI) error {
if disk == nil { if disk == nil {
return errDiskNotFound return errDiskNotFound
} }
@ -878,14 +881,14 @@ func makeFormatXLMetaVolumes(disk StorageAPI) error {
// Get all UUIDs which are present in reference format should // Get all UUIDs which are present in reference format should
// be present in the list of formats provided, those are considered // be present in the list of formats provided, those are considered
// as online UUIDs. // as online UUIDs.
func getOnlineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (onlineUUIDs []string) { func getOnlineUUIDs(refFormat *formatErasureV3, formats []*formatErasureV3) (onlineUUIDs []string) {
for _, format := range formats { for _, format := range formats {
if format == nil { if format == nil {
continue continue
} }
for _, set := range refFormat.XL.Sets { for _, set := range refFormat.Erasure.Sets {
for _, uuid := range set { for _, uuid := range set {
if format.XL.This == uuid { if format.Erasure.This == uuid {
onlineUUIDs = append(onlineUUIDs, uuid) onlineUUIDs = append(onlineUUIDs, uuid)
} }
} }
@ -897,13 +900,13 @@ func getOnlineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (onlineUUIDs [
// Look for all UUIDs which are not present in reference format // Look for all UUIDs which are not present in reference format
// but are present in the onlineUUIDs list, construct of list such // but are present in the onlineUUIDs list, construct of list such
// offline UUIDs. // offline UUIDs.
func getOfflineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (offlineUUIDs []string) { func getOfflineUUIDs(refFormat *formatErasureV3, formats []*formatErasureV3) (offlineUUIDs []string) {
onlineUUIDs := getOnlineUUIDs(refFormat, formats) onlineUUIDs := getOnlineUUIDs(refFormat, formats)
for i, set := range refFormat.XL.Sets { for i, set := range refFormat.Erasure.Sets {
for j, uuid := range set { for j, uuid := range set {
var found bool var found bool
for _, onlineUUID := range onlineUUIDs { for _, onlineUUID := range onlineUUIDs {
if refFormat.XL.Sets[i][j] == onlineUUID { if refFormat.Erasure.Sets[i][j] == onlineUUID {
found = true found = true
} }
} }
@ -916,13 +919,13 @@ func getOfflineUUIDs(refFormat *formatXLV3, formats []*formatXLV3) (offlineUUIDs
} }
// Mark all UUIDs that are offline. // Mark all UUIDs that are offline.
func markUUIDsOffline(refFormat *formatXLV3, formats []*formatXLV3) { func markUUIDsOffline(refFormat *formatErasureV3, formats []*formatErasureV3) {
offlineUUIDs := getOfflineUUIDs(refFormat, formats) offlineUUIDs := getOfflineUUIDs(refFormat, formats)
for i, set := range refFormat.XL.Sets { for i, set := range refFormat.Erasure.Sets {
for j := range set { for j := range set {
for _, offlineUUID := range offlineUUIDs { for _, offlineUUID := range offlineUUIDs {
if refFormat.XL.Sets[i][j] == offlineUUID { if refFormat.Erasure.Sets[i][j] == offlineUUID {
refFormat.XL.Sets[i][j] = offlineDiskUUID refFormat.Erasure.Sets[i][j] = offlineDiskUUID
} }
} }
} }
@ -930,29 +933,29 @@ func markUUIDsOffline(refFormat *formatXLV3, formats []*formatXLV3) {
} }
// Initialize a new set of set formats which will be written to all disks. // Initialize a new set of set formats which will be written to all disks.
func newHealFormatSets(refFormat *formatXLV3, setCount, drivesPerSet int, formats []*formatXLV3, errs []error) [][]*formatXLV3 { func newHealFormatSets(refFormat *formatErasureV3, setCount, drivesPerSet int, formats []*formatErasureV3, errs []error) [][]*formatErasureV3 {
newFormats := make([][]*formatXLV3, setCount) newFormats := make([][]*formatErasureV3, setCount)
for i := range refFormat.XL.Sets { for i := range refFormat.Erasure.Sets {
newFormats[i] = make([]*formatXLV3, drivesPerSet) newFormats[i] = make([]*formatErasureV3, drivesPerSet)
} }
for i := range refFormat.XL.Sets { for i := range refFormat.Erasure.Sets {
for j := range refFormat.XL.Sets[i] { for j := range refFormat.Erasure.Sets[i] {
if errs[i*drivesPerSet+j] == errUnformattedDisk || errs[i*drivesPerSet+j] == nil { if errs[i*drivesPerSet+j] == errUnformattedDisk || errs[i*drivesPerSet+j] == nil {
newFormats[i][j] = &formatXLV3{} newFormats[i][j] = &formatErasureV3{}
newFormats[i][j].Version = refFormat.Version newFormats[i][j].Version = refFormat.Version
newFormats[i][j].ID = refFormat.ID newFormats[i][j].ID = refFormat.ID
newFormats[i][j].Format = refFormat.Format newFormats[i][j].Format = refFormat.Format
newFormats[i][j].XL.Version = refFormat.XL.Version newFormats[i][j].Erasure.Version = refFormat.Erasure.Version
newFormats[i][j].XL.DistributionAlgo = refFormat.XL.DistributionAlgo newFormats[i][j].Erasure.DistributionAlgo = refFormat.Erasure.DistributionAlgo
} }
if errs[i*drivesPerSet+j] == errUnformattedDisk { if errs[i*drivesPerSet+j] == errUnformattedDisk {
newFormats[i][j].XL.This = "" newFormats[i][j].Erasure.This = ""
newFormats[i][j].XL.Sets = nil newFormats[i][j].Erasure.Sets = nil
continue continue
} }
if errs[i*drivesPerSet+j] == nil { if errs[i*drivesPerSet+j] == nil {
newFormats[i][j].XL.This = formats[i*drivesPerSet+j].XL.This newFormats[i][j].Erasure.This = formats[i*drivesPerSet+j].Erasure.This
newFormats[i][j].XL.Sets = nil newFormats[i][j].Erasure.Sets = nil
} }
} }
} }

View File

@ -26,13 +26,13 @@ import (
// Test get offline/online uuids. // Test get offline/online uuids.
func TestGetUUIDs(t *testing.T) { func TestGetUUIDs(t *testing.T) {
fmtV2 := newFormatXLV3(4, 16) fmtV2 := newFormatErasureV3(4, 16)
formats := make([]*formatXLV3, 64) formats := make([]*formatErasureV3, 64)
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
for j := 0; j < 16; j++ { for j := 0; j < 16; j++ {
newFormat := *fmtV2 newFormat := *fmtV2
newFormat.XL.This = fmtV2.XL.Sets[i][j] newFormat.Erasure.This = fmtV2.Erasure.Sets[i][j]
formats[i*16+j] = &newFormat formats[i*16+j] = &newFormat
} }
} }
@ -62,9 +62,9 @@ func TestGetUUIDs(t *testing.T) {
markUUIDsOffline(fmtV2, formats) markUUIDsOffline(fmtV2, formats)
gotCount = 0 gotCount = 0
for i := range fmtV2.XL.Sets { for i := range fmtV2.Erasure.Sets {
for j := range fmtV2.XL.Sets[i] { for j := range fmtV2.Erasure.Sets[i] {
if fmtV2.XL.Sets[i][j] == offlineDiskUUID { if fmtV2.Erasure.Sets[i][j] == offlineDiskUUID {
gotCount++ gotCount++
} }
} }
@ -74,16 +74,16 @@ func TestGetUUIDs(t *testing.T) {
} }
} }
// tests fixFormatXLV3 - fix format.json on all disks. // tests fixFormatErasureV3 - fix format.json on all disks.
func TestFixFormatV3(t *testing.T) { func TestFixFormatV3(t *testing.T) {
xlDirs, err := getRandomDisks(8) erasureDirs, err := getRandomDisks(8)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
for _, xlDir := range xlDirs { for _, erasureDir := range erasureDirs {
defer os.RemoveAll(xlDir) defer os.RemoveAll(erasureDir)
} }
endpoints := mustGetNewEndpoints(xlDirs...) endpoints := mustGetNewEndpoints(erasureDirs...)
storageDisks, errs := initStorageDisksWithErrors(endpoints) storageDisks, errs := initStorageDisksWithErrors(endpoints)
for _, err := range errs { for _, err := range errs {
@ -92,46 +92,46 @@ func TestFixFormatV3(t *testing.T) {
} }
} }
format := newFormatXLV3(1, 8) format := newFormatErasureV3(1, 8)
formats := make([]*formatXLV3, 8) formats := make([]*formatErasureV3, 8)
for j := 0; j < 8; j++ { for j := 0; j < 8; j++ {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.XL.This = format.XL.Sets[0][j] newFormat.Erasure.This = format.Erasure.Sets[0][j]
formats[j] = newFormat formats[j] = newFormat
} }
if err = initXLMetaVolumesInLocalDisks(storageDisks, formats); err != nil { if err = initErasureMetaVolumesInLocalDisks(storageDisks, formats); err != nil {
t.Fatal(err) t.Fatal(err)
} }
formats[1] = nil formats[1] = nil
expThis := formats[2].XL.This expThis := formats[2].Erasure.This
formats[2].XL.This = "" formats[2].Erasure.This = ""
if err := fixFormatXLV3(storageDisks, endpoints, formats); err != nil { if err := fixFormatErasureV3(storageDisks, endpoints, formats); err != nil {
t.Fatal(err) t.Fatal(err)
} }
newFormats, errs := loadFormatXLAll(storageDisks, false) newFormats, errs := loadFormatErasureAll(storageDisks, false)
for _, err := range errs { for _, err := range errs {
if err != nil && err != errUnformattedDisk { if err != nil && err != errUnformattedDisk {
t.Fatal(err) t.Fatal(err)
} }
} }
gotThis := newFormats[2].XL.This gotThis := newFormats[2].Erasure.This
if expThis != gotThis { if expThis != gotThis {
t.Fatalf("expected uuid %s, got %s", expThis, gotThis) t.Fatalf("expected uuid %s, got %s", expThis, gotThis)
} }
} }
// tests formatXLV3ThisEmpty conditions. // tests formatErasureV3ThisEmpty conditions.
func TestFormatXLEmpty(t *testing.T) { func TestFormatErasureEmpty(t *testing.T) {
format := newFormatXLV3(1, 16) format := newFormatErasureV3(1, 16)
formats := make([]*formatXLV3, 16) formats := make([]*formatErasureV3, 16)
for j := 0; j < 16; j++ { for j := 0; j < 16; j++ {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.XL.This = format.XL.Sets[0][j] newFormat.Erasure.This = format.Erasure.Sets[0][j]
formats[j] = newFormat formats[j] = newFormat
} }
@ -139,18 +139,18 @@ func TestFormatXLEmpty(t *testing.T) {
// empty should return false. // empty should return false.
formats[0] = nil formats[0] = nil
if ok := formatXLV3ThisEmpty(formats); ok { if ok := formatErasureV3ThisEmpty(formats); ok {
t.Fatalf("expected value false, got %t", ok) t.Fatalf("expected value false, got %t", ok)
} }
formats[2].XL.This = "" formats[2].Erasure.This = ""
if ok := formatXLV3ThisEmpty(formats); !ok { if ok := formatErasureV3ThisEmpty(formats); !ok {
t.Fatalf("expected value true, got %t", ok) t.Fatalf("expected value true, got %t", ok)
} }
} }
// Tests xl format migration. // Tests xl format migration.
func TestFormatXLMigrate(t *testing.T) { func TestFormatErasureMigrate(t *testing.T) {
// Get test root. // Get test root.
rootPath, err := getTestRoot() rootPath, err := getTestRoot()
if err != nil { if err != nil {
@ -158,12 +158,12 @@ func TestFormatXLMigrate(t *testing.T) {
} }
defer os.RemoveAll(rootPath) defer os.RemoveAll(rootPath)
m := &formatXLV1{} m := &formatErasureV1{}
m.Format = formatBackendXL m.Format = formatBackendErasure
m.Version = formatMetaVersionV1 m.Version = formatMetaVersionV1
m.XL.Version = formatXLVersionV1 m.Erasure.Version = formatErasureVersionV1
m.XL.Disk = mustGetUUID() m.Erasure.Disk = mustGetUUID()
m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()}
b, err := json.Marshal(m) b, err := json.Marshal(m)
if err != nil { if err != nil {
@ -178,43 +178,43 @@ func TestFormatXLMigrate(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err = formatXLMigrate(rootPath); err != nil { if err = formatErasureMigrate(rootPath); err != nil {
t.Fatal(err) t.Fatal(err)
} }
migratedVersion, err := formatGetBackendXLVersion(pathJoin(rootPath, minioMetaBucket, formatConfigFile)) migratedVersion, err := formatGetBackendErasureVersion(pathJoin(rootPath, minioMetaBucket, formatConfigFile))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if migratedVersion != formatXLVersionV3 { if migratedVersion != formatErasureVersionV3 {
t.Fatalf("expected version: %s, got: %s", formatXLVersionV3, migratedVersion) t.Fatalf("expected version: %s, got: %s", formatErasureVersionV3, migratedVersion)
} }
b, err = ioutil.ReadFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile)) b, err = ioutil.ReadFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
formatV3 := &formatXLV3{} formatV3 := &formatErasureV3{}
if err = json.Unmarshal(b, formatV3); err != nil { if err = json.Unmarshal(b, formatV3); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if formatV3.XL.This != m.XL.Disk { if formatV3.Erasure.This != m.Erasure.Disk {
t.Fatalf("expected disk uuid: %s, got: %s", m.XL.Disk, formatV3.XL.This) t.Fatalf("expected disk uuid: %s, got: %s", m.Erasure.Disk, formatV3.Erasure.This)
} }
if len(formatV3.XL.Sets) != 1 { if len(formatV3.Erasure.Sets) != 1 {
t.Fatalf("expected single set after migrating from v1 to v3, but found %d", len(formatV3.XL.Sets)) t.Fatalf("expected single set after migrating from v1 to v3, but found %d", len(formatV3.Erasure.Sets))
} }
if !reflect.DeepEqual(formatV3.XL.Sets[0], m.XL.JBOD) { if !reflect.DeepEqual(formatV3.Erasure.Sets[0], m.Erasure.JBOD) {
t.Fatalf("expected disk uuid: %v, got: %v", m.XL.JBOD, formatV3.XL.Sets[0]) t.Fatalf("expected disk uuid: %v, got: %v", m.Erasure.JBOD, formatV3.Erasure.Sets[0])
} }
m = &formatXLV1{} m = &formatErasureV1{}
m.Format = "unknown" m.Format = "unknown"
m.Version = formatMetaVersionV1 m.Version = formatMetaVersionV1
m.XL.Version = formatXLVersionV1 m.Erasure.Version = formatErasureVersionV1
m.XL.Disk = mustGetUUID() m.Erasure.Disk = mustGetUUID()
m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()}
b, err = json.Marshal(m) b, err = json.Marshal(m)
if err != nil { if err != nil {
@ -225,16 +225,16 @@ func TestFormatXLMigrate(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err = formatXLMigrate(rootPath); err == nil { if err = formatErasureMigrate(rootPath); err == nil {
t.Fatal("Expected to fail with unexpected backend format") t.Fatal("Expected to fail with unexpected backend format")
} }
m = &formatXLV1{} m = &formatErasureV1{}
m.Format = formatBackendXL m.Format = formatBackendErasure
m.Version = formatMetaVersionV1 m.Version = formatMetaVersionV1
m.XL.Version = "30" m.Erasure.Version = "30"
m.XL.Disk = mustGetUUID() m.Erasure.Disk = mustGetUUID()
m.XL.JBOD = []string{m.XL.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()} m.Erasure.JBOD = []string{m.Erasure.Disk, mustGetUUID(), mustGetUUID(), mustGetUUID()}
b, err = json.Marshal(m) b, err = json.Marshal(m)
if err != nil { if err != nil {
@ -245,25 +245,25 @@ func TestFormatXLMigrate(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if err = formatXLMigrate(rootPath); err == nil { if err = formatErasureMigrate(rootPath); err == nil {
t.Fatal("Expected to fail with unexpected backend format version number") t.Fatal("Expected to fail with unexpected backend format version number")
} }
} }
// Tests check format xl value. // Tests check format xl value.
func TestCheckFormatXLValue(t *testing.T) { func TestCheckFormatErasureValue(t *testing.T) {
testCases := []struct { testCases := []struct {
format *formatXLV3 format *formatErasureV3
success bool success bool
}{ }{
// Invalid XL format version "2". // Invalid Erasure format version "2".
{ {
&formatXLV3{ &formatErasureV3{
formatMetaV1: formatMetaV1{ formatMetaV1: formatMetaV1{
Version: "2", Version: "2",
Format: "XL", Format: "Erasure",
}, },
XL: struct { Erasure: struct {
Version string `json:"version"` Version string `json:"version"`
This string `json:"this"` This string `json:"this"`
Sets [][]string `json:"sets"` Sets [][]string `json:"sets"`
@ -274,14 +274,14 @@ func TestCheckFormatXLValue(t *testing.T) {
}, },
false, false,
}, },
// Invalid XL format "Unknown". // Invalid Erasure format "Unknown".
{ {
&formatXLV3{ &formatErasureV3{
formatMetaV1: formatMetaV1{ formatMetaV1: formatMetaV1{
Version: "1", Version: "1",
Format: "Unknown", Format: "Unknown",
}, },
XL: struct { Erasure: struct {
Version string `json:"version"` Version string `json:"version"`
This string `json:"this"` This string `json:"this"`
Sets [][]string `json:"sets"` Sets [][]string `json:"sets"`
@ -292,14 +292,14 @@ func TestCheckFormatXLValue(t *testing.T) {
}, },
false, false,
}, },
// Invalid XL format version "0". // Invalid Erasure format version "0".
{ {
&formatXLV3{ &formatErasureV3{
formatMetaV1: formatMetaV1{ formatMetaV1: formatMetaV1{
Version: "1", Version: "1",
Format: "XL", Format: "Erasure",
}, },
XL: struct { Erasure: struct {
Version string `json:"version"` Version string `json:"version"`
This string `json:"this"` This string `json:"this"`
Sets [][]string `json:"sets"` Sets [][]string `json:"sets"`
@ -314,65 +314,65 @@ func TestCheckFormatXLValue(t *testing.T) {
// Valid all test cases. // Valid all test cases.
for i, testCase := range testCases { for i, testCase := range testCases {
if err := checkFormatXLValue(testCase.format); err != nil && testCase.success { if err := checkFormatErasureValue(testCase.format); err != nil && testCase.success {
t.Errorf("Test %d: Expected failure %s", i+1, err) t.Errorf("Test %d: Expected failure %s", i+1, err)
} }
} }
} }
// Tests getFormatXLInQuorum() // Tests getFormatErasureInQuorum()
func TestGetFormatXLInQuorumCheck(t *testing.T) { func TestGetFormatErasureInQuorumCheck(t *testing.T) {
setCount := 2 setCount := 2
drivesPerSet := 16 drivesPerSet := 16
format := newFormatXLV3(setCount, drivesPerSet) format := newFormatErasureV3(setCount, drivesPerSet)
formats := make([]*formatXLV3, 32) formats := make([]*formatErasureV3, 32)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
for j := 0; j < drivesPerSet; j++ { for j := 0; j < drivesPerSet; j++ {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.XL.This = format.XL.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*drivesPerSet+j] = newFormat formats[i*drivesPerSet+j] = newFormat
} }
} }
// Return a format from list of formats in quorum. // Return a format from list of formats in quorum.
quorumFormat, err := getFormatXLInQuorum(formats) quorumFormat, err := getFormatErasureInQuorum(formats)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Check if the reference format and input formats are same. // Check if the reference format and input formats are same.
if err = formatXLV3Check(quorumFormat, formats[0]); err != nil { if err = formatErasureV3Check(quorumFormat, formats[0]); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// QuorumFormat has .This field empty on purpose, expect a failure. // QuorumFormat has .This field empty on purpose, expect a failure.
if err = formatXLV3Check(formats[0], quorumFormat); err == nil { if err = formatErasureV3Check(formats[0], quorumFormat); err == nil {
t.Fatal("Unexpected success") t.Fatal("Unexpected success")
} }
formats[0] = nil formats[0] = nil
quorumFormat, err = getFormatXLInQuorum(formats) quorumFormat, err = getFormatErasureInQuorum(formats)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
badFormat := *quorumFormat badFormat := *quorumFormat
badFormat.XL.Sets = nil badFormat.Erasure.Sets = nil
if err = formatXLV3Check(quorumFormat, &badFormat); err == nil { if err = formatErasureV3Check(quorumFormat, &badFormat); err == nil {
t.Fatal("Unexpected success") t.Fatal("Unexpected success")
} }
badFormatUUID := *quorumFormat badFormatUUID := *quorumFormat
badFormatUUID.XL.Sets[0][0] = "bad-uuid" badFormatUUID.Erasure.Sets[0][0] = "bad-uuid"
if err = formatXLV3Check(quorumFormat, &badFormatUUID); err == nil { if err = formatErasureV3Check(quorumFormat, &badFormatUUID); err == nil {
t.Fatal("Unexpected success") t.Fatal("Unexpected success")
} }
badFormatSetSize := *quorumFormat badFormatSetSize := *quorumFormat
badFormatSetSize.XL.Sets[0] = nil badFormatSetSize.Erasure.Sets[0] = nil
if err = formatXLV3Check(quorumFormat, &badFormatSetSize); err == nil { if err = formatErasureV3Check(quorumFormat, &badFormatSetSize); err == nil {
t.Fatal("Unexpected success") t.Fatal("Unexpected success")
} }
@ -381,36 +381,36 @@ func TestGetFormatXLInQuorumCheck(t *testing.T) {
formats[i] = nil formats[i] = nil
} }
} }
if _, err = getFormatXLInQuorum(formats); err == nil { if _, err = getFormatErasureInQuorum(formats); err == nil {
t.Fatal("Unexpected success") t.Fatal("Unexpected success")
} }
} }
// Tests formatXLGetDeploymentID() // Tests formatErasureGetDeploymentID()
func TestGetXLID(t *testing.T) { func TestGetErasureID(t *testing.T) {
setCount := 2 setCount := 2
drivesPerSet := 8 drivesPerSet := 8
format := newFormatXLV3(setCount, drivesPerSet) format := newFormatErasureV3(setCount, drivesPerSet)
formats := make([]*formatXLV3, 16) formats := make([]*formatErasureV3, 16)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
for j := 0; j < drivesPerSet; j++ { for j := 0; j < drivesPerSet; j++ {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.XL.This = format.XL.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*drivesPerSet+j] = newFormat formats[i*drivesPerSet+j] = newFormat
} }
} }
// Return a format from list of formats in quorum. // Return a format from list of formats in quorum.
quorumFormat, err := getFormatXLInQuorum(formats) quorumFormat, err := getFormatErasureInQuorum(formats)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Check if the reference format and input formats are same. // Check if the reference format and input formats are same.
var id string var id string
if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -419,15 +419,15 @@ func TestGetXLID(t *testing.T) {
} }
formats[0] = nil formats[0] = nil
if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if id == "" { if id == "" {
t.Fatal("ID cannot be empty.") t.Fatal("ID cannot be empty.")
} }
formats[1].XL.Sets[0][0] = "bad-uuid" formats[1].Erasure.Sets[0][0] = "bad-uuid"
if id, err = formatXLGetDeploymentID(quorumFormat, formats); err != nil { if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -436,7 +436,7 @@ func TestGetXLID(t *testing.T) {
} }
formats[2].ID = "bad-id" formats[2].ID = "bad-id"
if _, err = formatXLGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat { if _, err = formatErasureGetDeploymentID(quorumFormat, formats); err != errCorruptedFormat {
t.Fatal("Unexpected Success") t.Fatal("Unexpected Success")
} }
} }
@ -446,19 +446,19 @@ func TestNewFormatSets(t *testing.T) {
setCount := 2 setCount := 2
drivesPerSet := 16 drivesPerSet := 16
format := newFormatXLV3(setCount, drivesPerSet) format := newFormatErasureV3(setCount, drivesPerSet)
formats := make([]*formatXLV3, 32) formats := make([]*formatErasureV3, 32)
errs := make([]error, 32) errs := make([]error, 32)
for i := 0; i < setCount; i++ { for i := 0; i < setCount; i++ {
for j := 0; j < drivesPerSet; j++ { for j := 0; j < drivesPerSet; j++ {
newFormat := format.Clone() newFormat := format.Clone()
newFormat.XL.This = format.XL.Sets[i][j] newFormat.Erasure.This = format.Erasure.Sets[i][j]
formats[i*drivesPerSet+j] = newFormat formats[i*drivesPerSet+j] = newFormat
} }
} }
quorumFormat, err := getFormatXLInQuorum(formats) quorumFormat, err := getFormatErasureInQuorum(formats)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -75,7 +75,7 @@ func newFormatFSV1() (format *formatFSV1) {
} }
// Returns the field formatMetaV1.Format i.e the string "fs" which is never likely to change. // Returns the field formatMetaV1.Format i.e the string "fs" which is never likely to change.
// We do not use this function in XL to get the format as the file is not fcntl-locked on XL. // We do not use this function in Erasure to get the format as the file is not fcntl-locked on Erasure.
func formatMetaGetFormatBackendFS(r io.ReadSeeker) (string, error) { func formatMetaGetFormatBackendFS(r io.ReadSeeker) (string, error) {
format := &formatMetaV1{} format := &formatMetaV1{}
if err := jsonLoad(r, format); err != nil { if err := jsonLoad(r, format); err != nil {

View File

@ -42,7 +42,7 @@ func fsRemoveFile(ctx context.Context, filePath string) (err error) {
} }
if err = os.Remove((filePath)); err != nil { if err = os.Remove((filePath)); err != nil {
err = osErrToFSFileErr(err) err = osErrToFileErr(err)
if err != errFileNotFound { if err != errFileNotFound {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
} }
@ -186,37 +186,11 @@ func fsStatVolume(ctx context.Context, volume string) (os.FileInfo, error) {
return fi, nil return fi, nil
} }
// Is a one place function which converts all os.PathError
// into a more FS object layer friendly form, converts
// known errors into their typed form for top level
// interpretation.
func osErrToFSFileErr(err error) error {
if err == nil {
return nil
}
if os.IsNotExist(err) {
return errFileNotFound
}
if os.IsPermission(err) {
return errFileAccessDenied
}
if isSysErrNotDir(err) {
return errFileNotFound
}
if isSysErrPathNotFound(err) {
return errFileNotFound
}
if isSysErrTooManyFiles(err) {
return errTooManyOpenFiles
}
return err
}
// Lookup if directory exists, returns directory attributes upon success. // Lookup if directory exists, returns directory attributes upon success.
func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) { func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) {
fi, err := fsStat(ctx, statDir) fi, err := fsStat(ctx, statDir)
if err != nil { if err != nil {
err = osErrToFSFileErr(err) err = osErrToFileErr(err)
if err != errFileNotFound { if err != errFileNotFound {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
} }
@ -232,7 +206,7 @@ func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) {
func fsStatFile(ctx context.Context, statFile string) (os.FileInfo, error) { func fsStatFile(ctx context.Context, statFile string) (os.FileInfo, error) {
fi, err := fsStat(ctx, statFile) fi, err := fsStat(ctx, statFile)
if err != nil { if err != nil {
err = osErrToFSFileErr(err) err = osErrToFileErr(err)
if err != errFileNotFound { if err != errFileNotFound {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
} }
@ -267,13 +241,13 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos
fr, err := os.Open(readPath) fr, err := os.Open(readPath)
if err != nil { if err != nil {
return nil, 0, osErrToFSFileErr(err) return nil, 0, osErrToFileErr(err)
} }
// Stat to get the size of the file at path. // Stat to get the size of the file at path.
st, err := fr.Stat() st, err := fr.Stat()
if err != nil { if err != nil {
err = osErrToFSFileErr(err) err = osErrToFileErr(err)
if err != errFileNotFound { if err != errFileNotFound {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
} }
@ -327,7 +301,7 @@ func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, buf []
} }
writer, err := lock.Open(filePath, flags, 0666) writer, err := lock.Open(filePath, flags, 0666)
if err != nil { if err != nil {
return 0, osErrToFSFileErr(err) return 0, osErrToFileErr(err)
} }
defer writer.Close() defer writer.Close()
@ -399,7 +373,7 @@ func fsSimpleRenameFile(ctx context.Context, sourcePath, destPath string) error
if err := os.Rename(sourcePath, destPath); err != nil { if err := os.Rename(sourcePath, destPath); err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return osErrToFSFileErr(err) return osErrToFileErr(err)
} }
return nil return nil

View File

@ -28,10 +28,10 @@ import (
) )
func TestFSRenameFile(t *testing.T) { func TestFSRenameFile(t *testing.T) {
// create posix test setup // create xlStorage test setup
_, path, err := newPosixTestSetup() _, path, err := newXLStorageTestSetup()
if err != nil { if err != nil {
t.Fatalf("Unable to create posix test setup, %s", err) t.Fatalf("Unable to create xlStorage test setup, %s", err)
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
@ -53,10 +53,10 @@ func TestFSRenameFile(t *testing.T) {
} }
func TestFSStats(t *testing.T) { func TestFSStats(t *testing.T) {
// create posix test setup // create xlStorage test setup
_, path, err := newPosixTestSetup() _, path, err := newXLStorageTestSetup()
if err != nil { if err != nil {
t.Fatalf("Unable to create posix test setup, %s", err) t.Fatalf("Unable to create xlStorage test setup, %s", err)
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
@ -170,11 +170,11 @@ func TestFSStats(t *testing.T) {
if testCase.srcPath != "" { if testCase.srcPath != "" {
if _, err := fsStatFile(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol, if _, err := fsStatFile(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol,
testCase.srcPath)); err != testCase.expectedErr { testCase.srcPath)); err != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Fatalf("TestErasureStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} else { } else {
if _, err := fsStatVolume(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol)); err != testCase.expectedErr { if _, err := fsStatVolume(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol)); err != testCase.expectedErr {
t.Fatalf("TestPosix case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Fatalf("TestFS case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
} }
} }
@ -182,9 +182,9 @@ func TestFSStats(t *testing.T) {
func TestFSCreateAndOpen(t *testing.T) { func TestFSCreateAndOpen(t *testing.T) {
// Setup test environment. // Setup test environment.
_, path, err := newPosixTestSetup() _, path, err := newXLStorageTestSetup()
if err != nil { if err != nil {
t.Fatalf("Unable to create posix test setup, %s", err) t.Fatalf("Unable to create xlStorage test setup, %s", err)
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
@ -246,10 +246,10 @@ func TestFSCreateAndOpen(t *testing.T) {
} }
func TestFSDeletes(t *testing.T) { func TestFSDeletes(t *testing.T) {
// create posix test setup // create xlStorage test setup
_, path, err := newPosixTestSetup() _, path, err := newXLStorageTestSetup()
if err != nil { if err != nil {
t.Fatalf("Unable to create posix test setup, %s", err) t.Fatalf("Unable to create xlStorage test setup, %s", err)
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
@ -349,10 +349,10 @@ func TestFSDeletes(t *testing.T) {
} }
func BenchmarkFSDeleteFile(b *testing.B) { func BenchmarkFSDeleteFile(b *testing.B) {
// create posix test setup // create xlStorage test setup
_, path, err := newPosixTestSetup() _, path, err := newXLStorageTestSetup()
if err != nil { if err != nil {
b.Fatalf("Unable to create posix test setup, %s", err) b.Fatalf("Unable to create xlStorage test setup, %s", err)
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
@ -383,10 +383,10 @@ func BenchmarkFSDeleteFile(b *testing.B) {
// Tests fs removes. // Tests fs removes.
func TestFSRemoves(t *testing.T) { func TestFSRemoves(t *testing.T) {
// create posix test setup // create xlStorage test setup
_, path, err := newPosixTestSetup() _, path, err := newXLStorageTestSetup()
if err != nil { if err != nil {
t.Fatalf("Unable to create posix test setup, %s", err) t.Fatalf("Unable to create xlStorage test setup, %s", err)
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)
@ -500,10 +500,10 @@ func TestFSRemoves(t *testing.T) {
} }
func TestFSRemoveMeta(t *testing.T) { func TestFSRemoveMeta(t *testing.T) {
// create posix test setup // create xlStorage test setup
_, fsPath, err := newPosixTestSetup() _, fsPath, err := newXLStorageTestSetup()
if err != nil { if err != nil {
t.Fatalf("Unable to create posix test setup, %s", err) t.Fatalf("Unable to create xlStorage test setup, %s", err)
} }
defer os.RemoveAll(fsPath) defer os.RemoveAll(fsPath)

View File

@ -31,7 +31,7 @@ func TestFSV1MetadataObjInfo(t *testing.T) {
if objInfo.Size != 0 { if objInfo.Size != 0 {
t.Fatal("Unexpected object info value for Size", objInfo.Size) t.Fatal("Unexpected object info value for Size", objInfo.Size)
} }
if objInfo.ModTime != timeSentinel { if !objInfo.ModTime.Equal(timeSentinel) {
t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime) t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime)
} }
if objInfo.IsDir { if objInfo.IsDir {
@ -53,7 +53,7 @@ func TestReadFSMetadata(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil {
@ -88,7 +88,7 @@ func TestWriteFSMetadata(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Unexpected err: ", err) t.Fatal("Unexpected err: ", err)
} }
if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil {

View File

@ -252,6 +252,14 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) { startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID {
return pi, VersionNotFound{
Bucket: srcBucket,
Object: srcObject,
VersionID: srcOpts.VersionID,
}
}
if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil { if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil {
return pi, toObjectErr(err) return pi, toObjectErr(err)
} }
@ -269,6 +277,14 @@ func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, d
// written to '.minio.sys/tmp' location and safely renamed to // written to '.minio.sys/tmp' location and safely renamed to
// '.minio.sys/multipart' for reach parts. // '.minio.sys/multipart' for reach parts.
func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) {
if opts.VersionID != "" && opts.VersionID != nullVersionID {
return pi, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
data := r.Reader data := r.Reader
if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil { if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil {
return pi, toObjectErr(err, bucket) return pi, toObjectErr(err, bucket)

View File

@ -40,7 +40,7 @@ func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) {
// Create a context we can cancel. // Create a context we can cancel.
ctx, cancel := context.WithCancel(GlobalContext) ctx, cancel := context.WithCancel(GlobalContext)
obj.MakeBucketWithLocation(ctx, bucketName, "", false) obj.MakeBucketWithLocation(ctx, bucketName, BucketOptions{})
uploadID, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{}) uploadID, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{})
if err != nil { if err != nil {
@ -81,7 +81,7 @@ func TestNewMultipartUploadFaultyDisk(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -106,7 +106,7 @@ func TestPutObjectPartFaultyDisk(t *testing.T) {
data := []byte("12345") data := []byte("12345")
dataLen := int64(len(data)) dataLen := int64(len(data))
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -139,7 +139,7 @@ func TestCompleteMultipartUploadFaultyDisk(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -172,7 +172,7 @@ func TestCompleteMultipartUpload(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -204,7 +204,7 @@ func TestAbortMultipartUpload(t *testing.T) {
objectName := "object" objectName := "object"
data := []byte("12345") data := []byte("12345")
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }
@ -235,7 +235,7 @@ func TestListMultipartUploadsFaultyDisk(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Cannot create bucket, err: ", err) t.Fatal("Cannot create bucket, err: ", err)
} }

View File

@ -46,10 +46,10 @@ func TestRWPoolLongPath(t *testing.T) {
// Tests all RWPool methods. // Tests all RWPool methods.
func TestRWPool(t *testing.T) { func TestRWPool(t *testing.T) {
// create posix test setup // create xlStorage test setup
_, path, err := newPosixTestSetup() _, path, err := newXLStorageTestSetup()
if err != nil { if err != nil {
t.Fatalf("Unable to create posix test setup, %s", err) t.Fatalf("Unable to create xlStorage test setup, %s", err)
} }
defer os.RemoveAll(path) defer os.RemoveAll(path)

View File

@ -346,7 +346,7 @@ func (fs *FSObjects) crawlBucket(ctx context.Context, bucket string, cache dataU
} }
oi := fsMeta.ToObjectInfo(bucket, object, fi) oi := fsMeta.ToObjectInfo(bucket, object, fi)
sz := item.applyActions(ctx, fs, actionMeta{oi: oi, meta: fsMeta.Meta}) sz := item.applyActions(ctx, fs, actionMeta{oi: oi})
if sz >= 0 { if sz >= 0 {
return sz, nil return sz, nil
} }
@ -382,10 +382,9 @@ func (fs *FSObjects) statBucketDir(ctx context.Context, bucket string) (os.FileI
return st, nil return st, nil
} }
// MakeBucketWithLocation - create a new bucket, returns if it // MakeBucketWithLocation - create a new bucket, returns if it already exists.
// already exists. func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts BucketOptions) error {
func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { if opts.LockEnabled || opts.VersioningEnabled {
if lockEnabled {
return NotImplemented{} return NotImplemented{}
} }
@ -581,6 +580,14 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, forceDelet
// if source object and destination object are same we only // if source object and destination object are same we only
// update metadata. // update metadata.
func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) { func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID {
return oi, VersionNotFound{
Bucket: srcBucket,
Object: srcObject,
VersionID: srcOpts.VersionID,
}
}
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
defer ObjectPathUpdated(path.Join(dstBucket, dstObject)) defer ObjectPathUpdated(path.Join(dstBucket, dstObject))
@ -649,6 +656,13 @@ func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBu
// GetObjectNInfo - returns object info and a reader for object // GetObjectNInfo - returns object info and a reader for object
// content. // content.
func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) {
if opts.VersionID != "" && opts.VersionID != nullVersionID {
return nil, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
if err = checkGetObjArgs(ctx, bucket, object); err != nil { if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return nil, err return nil, err
} }
@ -746,6 +760,14 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
// startOffset indicates the starting read location of the object. // startOffset indicates the starting read location of the object.
// length indicates the total length of the object. // length indicates the total length of the object.
func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) { func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offset int64, length int64, writer io.Writer, etag string, opts ObjectOptions) (err error) {
if opts.VersionID != "" && opts.VersionID != nullVersionID {
return VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
if err = checkGetObjArgs(ctx, bucket, object); err != nil { if err = checkGetObjArgs(ctx, bucket, object); err != nil {
return err return err
} }
@ -948,6 +970,13 @@ func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object s
// GetObjectInfo - reads object metadata and replies back ObjectInfo. // GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) { func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) {
if opts.VersionID != "" && opts.VersionID != nullVersionID {
return oi, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
atomic.AddInt64(&fs.activeIOCount, 1) atomic.AddInt64(&fs.activeIOCount, 1)
defer func() { defer func() {
@ -998,6 +1027,10 @@ func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent strin
// Additionally writes `fs.json` which carries the necessary metadata // Additionally writes `fs.json` which carries the necessary metadata
// for future object operations. // for future object operations.
func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) { func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) {
if opts.Versioned {
return objInfo, NotImplemented{}
}
if err := checkPutObjectArgs(ctx, bucket, object, fs, r.Size()); err != nil { if err := checkPutObjectArgs(ctx, bucket, object, fs, r.Size()); err != nil {
return ObjectInfo{}, err return ObjectInfo{}, err
} }
@ -1146,26 +1179,45 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
// DeleteObjects - deletes an object from a bucket, this operation is destructive // DeleteObjects - deletes an object from a bucket, this operation is destructive
// and there are no rollbacks supported. // and there are no rollbacks supported.
func (fs *FSObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { func (fs *FSObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) {
errs := make([]error, len(objects)) errs := make([]error, len(objects))
dobjects := make([]DeletedObject, len(objects))
for idx, object := range objects { for idx, object := range objects {
errs[idx] = fs.DeleteObject(ctx, bucket, object) if object.VersionID != "" {
errs[idx] = NotImplemented{}
continue
}
_, errs[idx] = fs.DeleteObject(ctx, bucket, object.ObjectName, opts)
if errs[idx] == nil || isErrObjectNotFound(errs[idx]) {
dobjects[idx] = DeletedObject{
ObjectName: object.ObjectName,
}
errs[idx] = nil
}
} }
return errs, nil return dobjects, errs
} }
// DeleteObject - deletes an object from a bucket, this operation is destructive // DeleteObject - deletes an object from a bucket, this operation is destructive
// and there are no rollbacks supported. // and there are no rollbacks supported.
func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) error { func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
if opts.VersionID != "" && opts.VersionID != nullVersionID {
return objInfo, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
// Acquire a write lock before deleting the object. // Acquire a write lock before deleting the object.
lk := fs.NewNSLock(ctx, bucket, object) lk := fs.NewNSLock(ctx, bucket, object)
if err := lk.GetLock(globalOperationTimeout); err != nil { if err = lk.GetLock(globalOperationTimeout); err != nil {
return err return objInfo, err
} }
defer lk.Unlock() defer lk.Unlock()
if err := checkDelObjArgs(ctx, bucket, object); err != nil { if err = checkDelObjArgs(ctx, bucket, object); err != nil {
return err return objInfo, err
} }
defer ObjectPathUpdated(path.Join(bucket, object)) defer ObjectPathUpdated(path.Join(bucket, object))
@ -1175,8 +1227,8 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er
atomic.AddInt64(&fs.activeIOCount, -1) atomic.AddInt64(&fs.activeIOCount, -1)
}() }()
if _, err := fs.statBucketDir(ctx, bucket); err != nil { if _, err = fs.statBucketDir(ctx, bucket); err != nil {
return toObjectErr(err, bucket) return objInfo, toObjectErr(err, bucket)
} }
minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket) minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket)
@ -1189,23 +1241,23 @@ func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) er
} }
if lerr != nil && lerr != errFileNotFound { if lerr != nil && lerr != errFileNotFound {
logger.LogIf(ctx, lerr) logger.LogIf(ctx, lerr)
return toObjectErr(lerr, bucket, object) return objInfo, toObjectErr(lerr, bucket, object)
} }
} }
// Delete the object. // Delete the object.
if err := fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil { if err = fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil {
return toObjectErr(err, bucket, object) return objInfo, toObjectErr(err, bucket, object)
} }
if bucket != minioMetaBucket { if bucket != minioMetaBucket {
// Delete the metadata object. // Delete the metadata object.
err := fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath) err = fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath)
if err != nil && err != errFileNotFound { if err != nil && err != errFileNotFound {
return toObjectErr(err, bucket, object) return objInfo, toObjectErr(err, bucket, object)
} }
} }
return nil return ObjectInfo{Bucket: bucket, Name: object}, nil
} }
// Returns function "listDir" of the type listDirFunc. // Returns function "listDir" of the type listDirFunc.
@ -1313,6 +1365,11 @@ func (fs *FSObjects) getObjectETag(ctx context.Context, bucket, entry string, lo
return extractETag(fsMeta.Meta), nil return extractETag(fsMeta.Meta), nil
} }
// ListObjectVersions not implemented for FS mode.
func (fs *FSObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) {
return loi, NotImplemented{}
}
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool // ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
// state for future re-entrant list requests. // state for future re-entrant list requests.
func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
@ -1327,7 +1384,14 @@ func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, de
} }
// GetObjectTags - get object tags from an existing object // GetObjectTags - get object tags from an existing object
func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
if opts.VersionID != "" && opts.VersionID != nullVersionID {
return nil, VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
oi, err := fs.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) oi, err := fs.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
@ -1337,7 +1401,15 @@ func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string) (
} }
// PutObjectTags - replace or add tags to an existing object // PutObjectTags - replace or add tags to an existing object
func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
if opts.VersionID != "" && opts.VersionID != nullVersionID {
return VersionNotFound{
Bucket: bucket,
Object: object,
VersionID: opts.VersionID,
}
}
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)
fsMeta := fsMetaV1{} fsMeta := fsMetaV1{}
wlk, err := fs.rwPool.Write(fsMetaPath) wlk, err := fs.rwPool.Write(fsMetaPath)
@ -1369,30 +1441,30 @@ func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, t
} }
// DeleteObjectTags - delete object tags from an existing object // DeleteObjectTags - delete object tags from an existing object
func (fs *FSObjects) DeleteObjectTags(ctx context.Context, bucket, object string) error { func (fs *FSObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error {
return fs.PutObjectTags(ctx, bucket, object, "") return fs.PutObjectTags(ctx, bucket, object, "", opts)
} }
// ReloadFormat - no-op for fs, Valid only for XL. // ReloadFormat - no-op for fs, Valid only for Erasure.
func (fs *FSObjects) ReloadFormat(ctx context.Context, dryRun bool) error { func (fs *FSObjects) ReloadFormat(ctx context.Context, dryRun bool) error {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{} return NotImplemented{}
} }
// HealFormat - no-op for fs, Valid only for XL. // HealFormat - no-op for fs, Valid only for Erasure.
func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return madmin.HealResultItem{}, NotImplemented{} return madmin.HealResultItem{}, NotImplemented{}
} }
// HealObject - no-op for fs. Valid only for XL. // HealObject - no-op for fs. Valid only for Erasure.
func (fs *FSObjects) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) ( func (fs *FSObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (
res madmin.HealResultItem, err error) { res madmin.HealResultItem, err error) {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return res, NotImplemented{} return res, NotImplemented{}
} }
// HealBucket - no-op for fs, Valid only for XL. // HealBucket - no-op for fs, Valid only for Erasure.
func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem, func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, dryRun, remove bool) (madmin.HealResultItem,
error) { error) {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
@ -1408,13 +1480,13 @@ func (fs *FSObjects) Walk(ctx context.Context, bucket, prefix string, results ch
return fsWalk(ctx, fs, bucket, prefix, fs.listDirFactory(), results, fs.getObjectInfo, fs.getObjectInfo) return fsWalk(ctx, fs, bucket, prefix, fs.listDirFactory(), results, fs.getObjectInfo, fs.getObjectInfo)
} }
// HealObjects - no-op for fs. Valid only for XL. // HealObjects - no-op for fs. Valid only for Erasure.
func (fs *FSObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) (e error) { func (fs *FSObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{} return NotImplemented{}
} }
// ListBucketsHeal - list all buckets to be healed. Valid only for XL // ListBucketsHeal - list all buckets to be healed. Valid only for Erasure
func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) { func (fs *FSObjects) ListBucketsHeal(ctx context.Context) ([]BucketInfo, error) {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return []BucketInfo{}, NotImplemented{} return []BucketInfo{}, NotImplemented{}

View File

@ -36,7 +36,7 @@ func TestFSParentDirIsObject(t *testing.T) {
bucketName := "testbucket" bucketName := "testbucket"
objectName := "object" objectName := "object"
if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
objectContent := "12345" objectContent := "12345"
@ -124,7 +124,7 @@ func TestFSShutdown(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
objectContent := "12345" objectContent := "12345"
obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{})
obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{}) obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{})
return fs, disk return fs, disk
} }
@ -138,7 +138,7 @@ func TestFSShutdown(t *testing.T) {
// Test Shutdown with faulty disk // Test Shutdown with faulty disk
fs, disk = prepareTest() fs, disk = prepareTest()
fs.DeleteObject(GlobalContext, bucketName, objectName) fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{})
os.RemoveAll(disk) os.RemoveAll(disk)
if err := fs.Shutdown(GlobalContext); err != nil { if err := fs.Shutdown(GlobalContext); err != nil {
t.Fatal("Got unexpected fs shutdown error: ", err) t.Fatal("Got unexpected fs shutdown error: ", err)
@ -155,12 +155,12 @@ func TestFSGetBucketInfo(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
err := obj.MakeBucketWithLocation(GlobalContext, "a", "", false) err := obj.MakeBucketWithLocation(GlobalContext, "a", BucketOptions{})
if !isSameType(err, BucketNameInvalid{}) { if !isSameType(err, BucketNameInvalid{}) {
t.Fatal("BucketNameInvalid error not returned") t.Fatal("BucketNameInvalid error not returned")
} }
err = obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) err = obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -199,7 +199,7 @@ func TestFSPutObject(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "1/2/3/4/object" objectName := "1/2/3/4/object"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -267,33 +267,33 @@ func TestFSDeleteObject(t *testing.T) {
bucketName := "bucket" bucketName := "bucket"
objectName := "object" objectName := "object"
obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{})
obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{})
// Test with invalid bucket name // Test with invalid bucket name
if err := fs.DeleteObject(GlobalContext, "fo", objectName); !isSameType(err, BucketNameInvalid{}) { if _, err := fs.DeleteObject(GlobalContext, "fo", objectName, ObjectOptions{}); !isSameType(err, BucketNameInvalid{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with bucket does not exist // Test with bucket does not exist
if err := fs.DeleteObject(GlobalContext, "foobucket", "fooobject"); !isSameType(err, BucketNotFound{}) { if _, err := fs.DeleteObject(GlobalContext, "foobucket", "fooobject", ObjectOptions{}); !isSameType(err, BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with invalid object name // Test with invalid object name
if err := fs.DeleteObject(GlobalContext, bucketName, "\\"); !(isSameType(err, ObjectNotFound{}) || isSameType(err, ObjectNameInvalid{})) { if _, err := fs.DeleteObject(GlobalContext, bucketName, "\\", ObjectOptions{}); !(isSameType(err, ObjectNotFound{}) || isSameType(err, ObjectNameInvalid{})) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with object does not exist. // Test with object does not exist.
if err := fs.DeleteObject(GlobalContext, bucketName, "foooobject"); !isSameType(err, ObjectNotFound{}) { if _, err := fs.DeleteObject(GlobalContext, bucketName, "foooobject", ObjectOptions{}); !isSameType(err, ObjectNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Test with valid condition // Test with valid condition
if err := fs.DeleteObject(GlobalContext, bucketName, objectName); err != nil { if _, err := fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
// Delete object should err disk not found. // Delete object should err disk not found.
os.RemoveAll(disk) os.RemoveAll(disk)
if err := fs.DeleteObject(GlobalContext, bucketName, objectName); err != nil { if _, err := fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}); err != nil {
if !isSameType(err, BucketNotFound{}) { if !isSameType(err, BucketNotFound{}) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -311,7 +311,7 @@ func TestFSDeleteBucket(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{})
if err != nil { if err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -330,7 +330,7 @@ func TestFSDeleteBucket(t *testing.T) {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false) obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{})
// Delete bucket should get error disk not found. // Delete bucket should get error disk not found.
os.RemoveAll(disk) os.RemoveAll(disk)
@ -351,7 +351,7 @@ func TestFSListBuckets(t *testing.T) {
fs := obj.(*FSObjects) fs := obj.(*FSObjects)
bucketName := "bucket" bucketName := "bucket"
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, "", false); err != nil { if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, BucketOptions{}); err != nil {
t.Fatal("Unexpected error: ", err) t.Fatal("Unexpected error: ", err)
} }
@ -389,7 +389,7 @@ func TestFSHealObject(t *testing.T) {
defer os.RemoveAll(disk) defer os.RemoveAll(disk)
obj := initFSObjects(disk, t) obj := initFSObjects(disk, t)
_, err := obj.HealObject(GlobalContext, "bucket", "object", madmin.HealOpts{}) _, err := obj.HealObject(GlobalContext, "bucket", "object", "", madmin.HealOpts{})
if err == nil || !isSameType(err, NotImplemented{}) { if err == nil || !isSameType(err, NotImplemented{}) {
t.Fatalf("Heal Object should return NotImplemented error ") t.Fatalf("Heal Object should return NotImplemented error ")
} }

View File

@ -55,42 +55,6 @@ var (
IsStringEqual = isStringEqual IsStringEqual = isStringEqual
) )
// StatInfo - alias for statInfo
type StatInfo struct {
statInfo
}
// AnonErrToObjectErr - converts standard http codes into meaningful object layer errors.
func AnonErrToObjectErr(statusCode int, params ...string) error {
bucket := ""
object := ""
if len(params) >= 1 {
bucket = params[0]
}
if len(params) == 2 {
object = params[1]
}
switch statusCode {
case http.StatusNotFound:
if object != "" {
return ObjectNotFound{bucket, object}
}
return BucketNotFound{Bucket: bucket}
case http.StatusBadRequest:
if object != "" {
return ObjectNameInvalid{bucket, object}
}
return BucketNameInvalid{Bucket: bucket}
case http.StatusForbidden:
fallthrough
case http.StatusUnauthorized:
return AllAccessDisabled{bucket, object}
}
return errUnexpected
}
// FromMinioClientMetadata converts minio metadata to map[string]string // FromMinioClientMetadata converts minio metadata to map[string]string
func FromMinioClientMetadata(metadata map[string][]string) map[string]string { func FromMinioClientMetadata(metadata map[string][]string) map[string]string {
mm := map[string]string{} mm := map[string]string{}

View File

@ -26,6 +26,7 @@ import (
bucketsse "github.com/minio/minio/pkg/bucket/encryption" bucketsse "github.com/minio/minio/pkg/bucket/encryption"
"github.com/minio/minio/pkg/bucket/lifecycle" "github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/bucket/policy" "github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/versioning"
"github.com/minio/minio/pkg/madmin" "github.com/minio/minio/pkg/madmin"
) )
@ -88,6 +89,12 @@ func (a GatewayUnsupported) GetMultipartInfo(ctx context.Context, bucket string,
return MultipartInfo{}, NotImplemented{} return MultipartInfo{}, NotImplemented{}
} }
// ListObjectVersions returns all object parts for specified object in specified bucket
func (a GatewayUnsupported) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) {
logger.LogIf(ctx, NotImplemented{})
return ListObjectVersionsInfo{}, NotImplemented{}
}
// ListObjectParts returns all object parts for specified object in specified bucket // ListObjectParts returns all object parts for specified object in specified bucket
func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (lpi ListPartsInfo, err error) { func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (lpi ListPartsInfo, err error) {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
@ -121,33 +128,45 @@ func (a GatewayUnsupported) DeleteBucketPolicy(ctx context.Context, bucket strin
return NotImplemented{} return NotImplemented{}
} }
// SetBucketLifecycle sets lifecycle on bucket // SetBucketVersioning enables versioning on a bucket.
func (a GatewayUnsupported) SetBucketVersioning(ctx context.Context, bucket string, v *versioning.Versioning) error {
logger.LogIf(ctx, NotImplemented{})
return NotImplemented{}
}
// GetBucketVersioning retrieves versioning configuration of a bucket.
func (a GatewayUnsupported) GetBucketVersioning(ctx context.Context, bucket string) (*versioning.Versioning, error) {
logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{}
}
// SetBucketLifecycle enables lifecycle policies on a bucket.
func (a GatewayUnsupported) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error { func (a GatewayUnsupported) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{} return NotImplemented{}
} }
// GetBucketLifecycle will get lifecycle on bucket // GetBucketLifecycle retrieves lifecycle configuration of a bucket.
func (a GatewayUnsupported) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) { func (a GatewayUnsupported) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) {
return nil, NotImplemented{} return nil, NotImplemented{}
} }
// DeleteBucketLifecycle deletes all lifecycle on bucket // DeleteBucketLifecycle deletes all lifecycle policies on a bucket
func (a GatewayUnsupported) DeleteBucketLifecycle(ctx context.Context, bucket string) error { func (a GatewayUnsupported) DeleteBucketLifecycle(ctx context.Context, bucket string) error {
return NotImplemented{} return NotImplemented{}
} }
// GetBucketSSEConfig returns bucket encryption config on given bucket // GetBucketSSEConfig returns bucket encryption config on a bucket
func (a GatewayUnsupported) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) { func (a GatewayUnsupported) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) {
return nil, NotImplemented{} return nil, NotImplemented{}
} }
// SetBucketSSEConfig sets bucket encryption config on given bucket // SetBucketSSEConfig sets bucket encryption config on a bucket
func (a GatewayUnsupported) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error { func (a GatewayUnsupported) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error {
return NotImplemented{} return NotImplemented{}
} }
// DeleteBucketSSEConfig deletes bucket encryption config on given bucket // DeleteBucketSSEConfig deletes bucket encryption config on a bucket
func (a GatewayUnsupported) DeleteBucketSSEConfig(ctx context.Context, bucket string) error { func (a GatewayUnsupported) DeleteBucketSSEConfig(ctx context.Context, bucket string) error {
return NotImplemented{} return NotImplemented{}
} }
@ -173,7 +192,7 @@ func (a GatewayUnsupported) ListBucketsHeal(ctx context.Context) (buckets []Buck
} }
// HealObject - Not implemented stub // HealObject - Not implemented stub
func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) { func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) {
return h, NotImplemented{} return h, NotImplemented{}
} }
@ -188,7 +207,7 @@ func (a GatewayUnsupported) Walk(ctx context.Context, bucket, prefix string, res
} }
// HealObjects - Not implemented stub // HealObjects - Not implemented stub
func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn healObjectFn) (e error) { func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) {
return NotImplemented{} return NotImplemented{}
} }
@ -205,19 +224,19 @@ func (a GatewayUnsupported) GetMetrics(ctx context.Context) (*Metrics, error) {
} }
// PutObjectTags - not implemented. // PutObjectTags - not implemented.
func (a GatewayUnsupported) PutObjectTags(ctx context.Context, bucket, object string, tags string) error { func (a GatewayUnsupported) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) error {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{} return NotImplemented{}
} }
// GetObjectTags - not implemented. // GetObjectTags - not implemented.
func (a GatewayUnsupported) GetObjectTags(ctx context.Context, bucket, object string) (*tags.Tags, error) { func (a GatewayUnsupported) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return nil, NotImplemented{} return nil, NotImplemented{}
} }
// DeleteObjectTags - not implemented. // DeleteObjectTags - not implemented.
func (a GatewayUnsupported) DeleteObjectTags(ctx context.Context, bucket, object string) error { func (a GatewayUnsupported) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) error {
logger.LogIf(ctx, NotImplemented{}) logger.LogIf(ctx, NotImplemented{})
return NotImplemented{} return NotImplemented{}
} }

View File

@ -553,8 +553,8 @@ func (a *azureObjects) StorageInfo(ctx context.Context, _ bool) (si minio.Storag
} }
// MakeBucketWithLocation - Create a new container on azure backend. // MakeBucketWithLocation - Create a new container on azure backend.
func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error {
if lockEnabled { if opts.LockEnabled || opts.VersioningEnabled {
return minio.NotImplemented{} return minio.NotImplemented{}
} }
@ -966,21 +966,30 @@ func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, des
// DeleteObject - Deletes a blob on azure container, uses Azure // DeleteObject - Deletes a blob on azure container, uses Azure
// equivalent `BlobURL.Delete`. // equivalent `BlobURL.Delete`.
func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string) error { func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
blob := a.client.NewContainerURL(bucket).NewBlobURL(object) blob := a.client.NewContainerURL(bucket).NewBlobURL(object)
_, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) _, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
if err != nil { if err != nil {
return azureToObjectError(err, bucket, object) return minio.ObjectInfo{}, azureToObjectError(err, bucket, object)
} }
return nil return minio.ObjectInfo{
Bucket: bucket,
Name: object,
}, nil
} }
func (a *azureObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { func (a *azureObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) {
errs := make([]error, len(objects)) errs := make([]error, len(objects))
dobjects := make([]minio.DeletedObject, len(objects))
for idx, object := range objects { for idx, object := range objects {
errs[idx] = a.DeleteObject(ctx, bucket, object) _, errs[idx] = a.DeleteObject(ctx, bucket, object.ObjectName, opts)
if errs[idx] == nil {
dobjects[idx] = minio.DeletedObject{
ObjectName: object.ObjectName,
}
}
} }
return errs, nil return dobjects, errs
} }
// ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result. // ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result.

View File

@ -243,43 +243,6 @@ func TestAzureCodesToObjectError(t *testing.T) {
} }
} }
func TestAnonErrToObjectErr(t *testing.T) {
testCases := []struct {
name string
statusCode int
params []string
wantErr error
}{
{"ObjectNotFound",
http.StatusNotFound,
[]string{"testBucket", "testObject"},
minio.ObjectNotFound{Bucket: "testBucket", Object: "testObject"},
},
{"BucketNotFound",
http.StatusNotFound,
[]string{"testBucket", ""},
minio.BucketNotFound{Bucket: "testBucket"},
},
{"ObjectNameInvalid",
http.StatusBadRequest,
[]string{"testBucket", "testObject"},
minio.ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"},
},
{"BucketNameInvalid",
http.StatusBadRequest,
[]string{"testBucket", ""},
minio.BucketNameInvalid{Bucket: "testBucket"},
},
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
if err := minio.AnonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) {
t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr)
}
})
}
}
func TestCheckAzureUploadID(t *testing.T) { func TestCheckAzureUploadID(t *testing.T) {
invalidUploadIDs := []string{ invalidUploadIDs := []string{
"123456789abcdefg", "123456789abcdefg",

View File

@ -421,14 +421,15 @@ func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageI
} }
// MakeBucketWithLocation - Create a new container on GCS backend. // MakeBucketWithLocation - Create a new container on GCS backend.
func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error {
if lockEnabled { if opts.LockEnabled || opts.VersioningEnabled {
return minio.NotImplemented{} return minio.NotImplemented{}
} }
bkt := l.client.Bucket(bucket) bkt := l.client.Bucket(bucket)
// we'll default to the us multi-region in case of us-east-1 // we'll default to the us multi-region in case of us-east-1
location := opts.Location
if location == "us-east-1" { if location == "us-east-1" {
location = "us" location = "us"
} }
@ -958,22 +959,31 @@ func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject
} }
// DeleteObject - Deletes a blob in bucket // DeleteObject - Deletes a blob in bucket
func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string) error { func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
err := l.client.Bucket(bucket).Object(object).Delete(ctx) err := l.client.Bucket(bucket).Object(object).Delete(ctx)
if err != nil { if err != nil {
logger.LogIf(ctx, err) logger.LogIf(ctx, err)
return gcsToObjectError(err, bucket, object) return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object)
} }
return nil return minio.ObjectInfo{
Bucket: bucket,
Name: object,
}, nil
} }
func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) {
errs := make([]error, len(objects)) errs := make([]error, len(objects))
dobjects := make([]minio.DeletedObject, len(objects))
for idx, object := range objects { for idx, object := range objects {
errs[idx] = l.DeleteObject(ctx, bucket, object) _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts)
if errs[idx] == nil {
dobjects[idx] = minio.DeletedObject{
ObjectName: object.ObjectName,
}
}
} }
return errs, nil return dobjects, errs
} }
// NewMultipartUpload - upload object in multiple parts // NewMultipartUpload - upload object in multiple parts

View File

@ -75,7 +75,7 @@ EXAMPLES:
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png"
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75
{{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85
@ -283,8 +283,8 @@ func (n *hdfsObjects) DeleteBucket(ctx context.Context, bucket string, forceDele
return hdfsToObjectErr(ctx, n.clnt.Remove(minio.PathJoin(hdfsSeparator, bucket)), bucket) return hdfsToObjectErr(ctx, n.clnt.Remove(minio.PathJoin(hdfsSeparator, bucket)), bucket)
} }
func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error {
if lockEnabled { if opts.LockEnabled || opts.VersioningEnabled {
return minio.NotImplemented{} return minio.NotImplemented{}
} }
@ -439,16 +439,26 @@ func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continu
}, nil }, nil
} }
func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string) error { func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
return hdfsToObjectErr(ctx, n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), minio.PathJoin(hdfsSeparator, bucket, object)), bucket, object) err := hdfsToObjectErr(ctx, n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), minio.PathJoin(hdfsSeparator, bucket, object)), bucket, object)
return minio.ObjectInfo{
Bucket: bucket,
Name: object,
}, err
} }
func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) {
errs := make([]error, len(objects)) errs := make([]error, len(objects))
dobjects := make([]minio.DeletedObject, len(objects))
for idx, object := range objects { for idx, object := range objects {
errs[idx] = n.DeleteObject(ctx, bucket, object) _, errs[idx] = n.DeleteObject(ctx, bucket, object.ObjectName, opts)
if errs[idx] == nil {
dobjects[idx] = minio.DeletedObject{
ObjectName: object.ObjectName,
}
}
} }
return errs, nil return dobjects, errs
} }
func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {

View File

@ -258,8 +258,8 @@ func getPartMetaPath(object, uploadID string, partID int) string {
} }
// deletes the custom dare metadata file saved at the backend // deletes the custom dare metadata file saved at the backend
func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) error { func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) (minio.ObjectInfo, error) {
return l.s3Objects.DeleteObject(ctx, bucket, metaFileName) return l.s3Objects.DeleteObject(ctx, bucket, metaFileName, minio.ObjectOptions{})
} }
func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
@ -381,14 +381,14 @@ func (l *s3EncObjects) CopyObject(ctx context.Context, srcBucket string, srcObje
// DeleteObject deletes a blob in bucket // DeleteObject deletes a blob in bucket
// For custom gateway encrypted large objects, cleans up encrypted content and metadata files // For custom gateway encrypted large objects, cleans up encrypted content and metadata files
// from the backend. // from the backend.
func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string) error { func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
// Get dare meta json // Get dare meta json
if _, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)); err != nil { if _, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)); err != nil {
return l.s3Objects.DeleteObject(ctx, bucket, object) logger.LogIf(minio.GlobalContext, err)
return l.s3Objects.DeleteObject(ctx, bucket, object, opts)
} }
// delete encrypted object // delete encrypted object
l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object)) l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object), opts)
return l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) return l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object))
} }
@ -446,7 +446,7 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri
} }
if opts.ServerSideEncryption == nil { if opts.ServerSideEncryption == nil {
defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object))
defer l.DeleteObject(ctx, bucket, getGWContentPath(object)) defer l.DeleteObject(ctx, bucket, getGWContentPath(object), opts)
return l.s3Objects.PutObject(ctx, bucket, object, data, minio.ObjectOptions{UserDefined: opts.UserDefined}) return l.s3Objects.PutObject(ctx, bucket, object, data, minio.ObjectOptions{UserDefined: opts.UserDefined})
} }
@ -470,7 +470,7 @@ func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object stri
} }
objInfo = gwMeta.ToObjectInfo(bucket, object) objInfo = gwMeta.ToObjectInfo(bucket, object)
// delete any unencrypted content of the same name created previously // delete any unencrypted content of the same name created previously
l.s3Objects.DeleteObject(ctx, bucket, object) l.s3Objects.DeleteObject(ctx, bucket, object, opts)
return objInfo, nil return objInfo, nil
} }
@ -586,7 +586,7 @@ func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string,
return minio.InvalidUploadID{UploadID: uploadID} return minio.InvalidUploadID{UploadID: uploadID}
} }
for _, obj := range loi.Objects { for _, obj := range loi.Objects {
if err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name); err != nil { if _, err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name, minio.ObjectOptions{}); err != nil {
return minio.ErrorRespToObjectError(err) return minio.ErrorRespToObjectError(err)
} }
startAfter = obj.Name startAfter = obj.Name
@ -608,7 +608,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
if e == nil { if e == nil {
// delete any encrypted version of object that might exist // delete any encrypted version of object that might exist
defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object))
defer l.DeleteObject(ctx, bucket, getGWContentPath(object)) defer l.DeleteObject(ctx, bucket, getGWContentPath(object), opts)
} }
return oi, e return oi, e
} }
@ -640,7 +640,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
} }
//delete any unencrypted version of object that might be on the backend //delete any unencrypted version of object that might be on the backend
defer l.s3Objects.DeleteObject(ctx, bucket, object) defer l.s3Objects.DeleteObject(ctx, bucket, object, opts)
// Save the final object size and modtime. // Save the final object size and modtime.
gwMeta.Stat.Size = objectSize gwMeta.Stat.Size = objectSize
@ -665,7 +665,7 @@ func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, obje
break break
} }
startAfter = obj.Name startAfter = obj.Name
l.s3Objects.DeleteObject(ctx, bucket, obj.Name) l.s3Objects.DeleteObject(ctx, bucket, obj.Name, opts)
} }
continuationToken = loi.NextContinuationToken continuationToken = loi.NextContinuationToken
if !loi.IsTruncated || done { if !loi.IsTruncated || done {
@ -716,7 +716,7 @@ func (l *s3EncObjects) cleanupStaleEncMultipartUploadsOnGW(ctx context.Context,
for _, b := range buckets { for _, b := range buckets {
expParts := l.getStalePartsForBucket(ctx, b.Name, expiry) expParts := l.getStalePartsForBucket(ctx, b.Name, expiry)
for k := range expParts { for k := range expParts {
l.s3Objects.DeleteObject(ctx, b.Name, k) l.s3Objects.DeleteObject(ctx, b.Name, k, minio.ObjectOptions{})
} }
} }
} }
@ -783,7 +783,7 @@ func (l *s3EncObjects) DeleteBucket(ctx context.Context, bucket string, forceDel
} }
} }
for k := range expParts { for k := range expParts {
l.s3Objects.DeleteObject(ctx, bucket, k) l.s3Objects.DeleteObject(ctx, bucket, k, minio.ObjectOptions{})
} }
err := l.Client.RemoveBucket(bucket) err := l.Client.RemoveBucket(bucket)
if err != nil { if err != nil {

View File

@ -287,8 +287,8 @@ func (l *s3Objects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageIn
} }
// MakeBucket creates a new container on S3 backend. // MakeBucket creates a new container on S3 backend.
func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error {
if lockEnabled { if opts.LockEnabled || opts.VersioningEnabled {
return minio.NotImplemented{} return minio.NotImplemented{}
} }
@ -302,7 +302,7 @@ func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location
if s3utils.CheckValidBucketName(bucket) != nil { if s3utils.CheckValidBucketName(bucket) != nil {
return minio.BucketNameInvalid{Bucket: bucket} return minio.BucketNameInvalid{Bucket: bucket}
} }
err := l.Client.MakeBucket(bucket, location) err := l.Client.MakeBucket(bucket, opts.Location)
if err != nil { if err != nil {
return minio.ErrorRespToObjectError(err, bucket) return minio.ErrorRespToObjectError(err, bucket)
} }
@ -518,21 +518,30 @@ func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject
} }
// DeleteObject deletes a blob in bucket // DeleteObject deletes a blob in bucket
func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error { func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
err := l.Client.RemoveObject(bucket, object) err := l.Client.RemoveObject(bucket, object)
if err != nil { if err != nil {
return minio.ErrorRespToObjectError(err, bucket, object) return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object)
} }
return nil return minio.ObjectInfo{
Bucket: bucket,
Name: object,
}, nil
} }
func (l *s3Objects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { func (l *s3Objects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) {
errs := make([]error, len(objects)) errs := make([]error, len(objects))
dobjects := make([]minio.DeletedObject, len(objects))
for idx, object := range objects { for idx, object := range objects {
errs[idx] = l.DeleteObject(ctx, bucket, object) _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts)
if errs[idx] == nil {
dobjects[idx] = minio.DeletedObject{
ObjectName: object.ObjectName,
}
}
} }
return errs, nil return dobjects, errs
} }
// ListMultipartUploads lists all multipart uploads. // ListMultipartUploads lists all multipart uploads.
@ -700,11 +709,10 @@ func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error
} }
// GetObjectTags gets the tags set on the object // GetObjectTags gets the tags set on the object
func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object string) (*tags.Tags, error) { func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (*tags.Tags, error) {
var err error var err error
var tagObj *tags.Tags var tagObj *tags.Tags
var tagStr string var tagStr string
var opts minio.ObjectOptions
if _, err = l.GetObjectInfo(ctx, bucket, object, opts); err != nil { if _, err = l.GetObjectInfo(ctx, bucket, object, opts); err != nil {
return nil, minio.ErrorRespToObjectError(err, bucket, object) return nil, minio.ErrorRespToObjectError(err, bucket, object)
@ -721,7 +729,7 @@ func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object str
} }
// PutObjectTags attaches the tags to the object // PutObjectTags attaches the tags to the object
func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, tagStr string) error { func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, tagStr string, opts minio.ObjectOptions) error {
tagObj, err := tags.Parse(tagStr, true) tagObj, err := tags.Parse(tagStr, true)
if err != nil { if err != nil {
return minio.ErrorRespToObjectError(err, bucket, object) return minio.ErrorRespToObjectError(err, bucket, object)
@ -733,7 +741,7 @@ func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, ta
} }
// DeleteObjectTags removes the tags attached to the object // DeleteObjectTags removes the tags attached to the object
func (l *s3Objects) DeleteObjectTags(ctx context.Context, bucket, object string) error { func (l *s3Objects) DeleteObjectTags(ctx context.Context, bucket, object string, opts minio.ObjectOptions) error {
if err := l.Client.RemoveObjectTagging(bucket, object); err != nil { if err := l.Client.RemoveObjectTagging(bucket, object); err != nil {
return minio.ErrorRespToObjectError(err, bucket, object) return minio.ErrorRespToObjectError(err, bucket, object)
} }

View File

@ -103,7 +103,7 @@ func isHTTPHeaderSizeTooLarge(header http.Header) bool {
length := len(key) + len(header.Get(key)) length := len(key) + len(header.Get(key))
size += length size += length
for _, prefix := range userMetadataKeyPrefixes { for _, prefix := range userMetadataKeyPrefixes {
if HasPrefix(key, prefix) { if strings.HasPrefix(strings.ToLower(key), prefix) {
usersize += length usersize += length
break break
} }
@ -444,74 +444,75 @@ func setIgnoreResourcesHandler(h http.Handler) http.Handler {
return resourceHandler{h} return resourceHandler{h}
} }
var supportedDummyBucketAPIs = map[string][]string{
"acl": {http.MethodPut, http.MethodGet},
"cors": {http.MethodGet},
"website": {http.MethodGet, http.MethodDelete},
"logging": {http.MethodGet},
"accelerate": {http.MethodGet},
"replication": {http.MethodGet},
"requestPayment": {http.MethodGet},
}
// List of not implemented bucket queries
var notImplementedBucketResourceNames = map[string]struct{}{
"cors": {},
"metrics": {},
"website": {},
"logging": {},
"inventory": {},
"accelerate": {},
"replication": {},
"requestPayment": {},
}
// Checks requests for not implemented Bucket resources // Checks requests for not implemented Bucket resources
func ignoreNotImplementedBucketResources(req *http.Request) bool { func ignoreNotImplementedBucketResources(req *http.Request) bool {
for name := range req.URL.Query() { for name := range req.URL.Query() {
// Enable PutBucketACL, GetBucketACL, GetBucketCors, methods, ok := supportedDummyBucketAPIs[name]
// GetBucketWebsite, GetBucketAcccelerate, if ok {
// GetBucketRequestPayment, GetBucketLogging, for _, method := range methods {
// GetBucketLifecycle, GetBucketReplication, if method == req.Method {
// GetBucketTagging, GetBucketVersioning, return false
// DeleteBucketTagging, and DeleteBucketWebsite }
// dummy calls specifically. }
if name == "acl" && req.Method == http.MethodPut {
return false
}
if ((name == "acl" ||
name == "cors" ||
name == "website" ||
name == "accelerate" ||
name == "requestPayment" ||
name == "logging" ||
name == "lifecycle" ||
name == "replication" ||
name == "tagging" ||
name == "versioning") && req.Method == http.MethodGet) ||
((name == "tagging" ||
name == "website") && req.Method == http.MethodDelete) {
return false
} }
if notImplementedBucketResourceNames[name] { if _, ok := notImplementedBucketResourceNames[name]; ok {
return true return true
} }
} }
return false return false
} }
var supportedDummyObjectAPIs = map[string][]string{
"acl": {http.MethodGet, http.MethodPut},
}
// List of not implemented object APIs
var notImplementedObjectResourceNames = map[string]struct{}{
"restore": {},
"torrent": {},
}
// Checks requests for not implemented Object resources // Checks requests for not implemented Object resources
func ignoreNotImplementedObjectResources(req *http.Request) bool { func ignoreNotImplementedObjectResources(req *http.Request) bool {
for name := range req.URL.Query() { for name := range req.URL.Query() {
// Enable Get/PutObjectACL dummy call specifically. methods, ok := supportedDummyObjectAPIs[name]
if name == "acl" && (req.Method == http.MethodGet || req.Method == http.MethodPut) { if ok {
return false for _, method := range methods {
if method == req.Method {
return false
}
}
} }
if notImplementedObjectResourceNames[name] { if _, ok := notImplementedObjectResourceNames[name]; ok {
return true return true
} }
} }
return false return false
} }
// List of not implemented bucket queries
var notImplementedBucketResourceNames = map[string]bool{
"accelerate": true,
"cors": true,
"inventory": true,
"logging": true,
"metrics": true,
"replication": true,
"requestPayment": true,
"versioning": true,
"website": true,
}
// List of not implemented object queries
var notImplementedObjectResourceNames = map[string]bool{
"restore": true,
"torrent": true,
}
// Resource handler ServeHTTP() wrapper // Resource handler ServeHTTP() wrapper
func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (h resourceHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
bucketName, objectName := request2BucketObjectName(r) bucketName, objectName := request2BucketObjectName(r)

View File

@ -199,12 +199,16 @@ var containsReservedMetadataTests = []struct {
} }
func TestContainsReservedMetadata(t *testing.T) { func TestContainsReservedMetadata(t *testing.T) {
for i, test := range containsReservedMetadataTests { for _, test := range containsReservedMetadataTests {
if contains := containsReservedMetadata(test.header); contains && !test.shouldFail { test := test
t.Errorf("Test %d: contains reserved header but should not fail", i) t.Run("", func(t *testing.T) {
} else if !contains && test.shouldFail { contains := containsReservedMetadata(test.header)
t.Errorf("Test %d: does not contain reserved header but failed", i) if contains && !test.shouldFail {
} t.Errorf("contains reserved header but should not fail")
} else if !contains && test.shouldFail {
t.Errorf("does not contain reserved header but failed")
}
})
} }
} }

View File

@ -79,7 +79,7 @@ func getLocalBackgroundHealStatus() madmin.BgHealState {
} }
// healErasureSet lists and heals all objects in a specific erasure set // healErasureSet lists and heals all objects in a specific erasure set
func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesPerSet int) error { func healErasureSet(ctx context.Context, setIndex int, xlObj *erasureObjects, drivesPerSet int) error {
buckets, err := xlObj.ListBuckets(ctx) buckets, err := xlObj.ListBuckets(ctx)
if err != nil { if err != nil {
return err return err
@ -105,32 +105,34 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP
for _, bucket := range buckets { for _, bucket := range buckets {
// Heal current bucket // Heal current bucket
bgSeq.sourceCh <- healSource{ bgSeq.sourceCh <- healSource{
path: bucket.Name, bucket: bucket.Name,
} }
var entryChs []FileInfoCh var entryChs []FileInfoVersionsCh
for _, disk := range xlObj.getLoadBalancedDisks() { for _, disk := range xlObj.getLoadBalancedDisks() {
if disk == nil { if disk == nil {
// Disk can be offline // Disk can be offline
continue continue
} }
entryCh, err := disk.Walk(bucket.Name, "", "", true, xlMetaJSONFile, readMetadata, ctx.Done())
entryCh, err := disk.WalkVersions(bucket.Name, "", "", true, ctx.Done())
if err != nil { if err != nil {
// Disk walk returned error, ignore it. // Disk walk returned error, ignore it.
continue continue
} }
entryChs = append(entryChs, FileInfoCh{
entryChs = append(entryChs, FileInfoVersionsCh{
Ch: entryCh, Ch: entryCh,
}) })
} }
entriesValid := make([]bool, len(entryChs)) entriesValid := make([]bool, len(entryChs))
entries := make([]FileInfo, len(entryChs)) entries := make([]FileInfoVersions, len(entryChs))
for { for {
entry, quorumCount, ok := lexicallySortedEntry(entryChs, entries, entriesValid) entry, quorumCount, ok := lexicallySortedEntryVersions(entryChs, entries, entriesValid)
if !ok { if !ok {
return nil break
} }
if quorumCount == drivesPerSet { if quorumCount == drivesPerSet {
@ -138,8 +140,12 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP
continue continue
} }
bgSeq.sourceCh <- healSource{ for _, version := range entry.Versions {
path: pathJoin(bucket.Name, entry.Name), bgSeq.sourceCh <- healSource{
bucket: bucket.Name,
object: version.Name,
versionID: version.VersionID,
}
} }
} }
} }
@ -148,13 +154,15 @@ func healErasureSet(ctx context.Context, setIndex int, xlObj *xlObjects, drivesP
} }
// deepHealObject heals given object path in deep to fix bitrot. // deepHealObject heals given object path in deep to fix bitrot.
func deepHealObject(objectPath string) { func deepHealObject(bucket, object, versionID string) {
// Get background heal sequence to send elements to heal // Get background heal sequence to send elements to heal
bgSeq, _ := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) bgSeq, _ := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
bgSeq.sourceCh <- healSource{ bgSeq.sourceCh <- healSource{
path: objectPath, bucket: bucket,
opts: &madmin.HealOpts{ScanMode: madmin.HealDeepScan}, object: object,
versionID: versionID,
opts: &madmin.HealOpts{ScanMode: madmin.HealDeepScan},
} }
} }
@ -172,7 +180,7 @@ func durationToNextHealRound(lastHeal time.Time) time.Duration {
} }
// Healing leader will take the charge of healing all erasure sets // Healing leader will take the charge of healing all erasure sets
func execLeaderTasks(ctx context.Context, z *xlZones) { func execLeaderTasks(ctx context.Context, z *erasureZones) {
// So that we don't heal immediately, but after one month. // So that we don't heal immediately, but after one month.
lastScanTime := UTCNow() lastScanTime := UTCNow()
// Get background heal sequence to send elements to heal // Get background heal sequence to send elements to heal
@ -211,7 +219,7 @@ func execLeaderTasks(ctx context.Context, z *xlZones) {
} }
func startGlobalHeal(ctx context.Context, objAPI ObjectLayer) { func startGlobalHeal(ctx context.Context, objAPI ObjectLayer) {
zones, ok := objAPI.(*xlZones) zones, ok := objAPI.(*erasureZones)
if !ok { if !ok {
return return
} }

View File

@ -61,8 +61,8 @@ const (
globalNetBSDOSName = "netbsd" globalNetBSDOSName = "netbsd"
globalMacOSName = "darwin" globalMacOSName = "darwin"
globalMinioModeFS = "mode-server-fs" globalMinioModeFS = "mode-server-fs"
globalMinioModeXL = "mode-server-xl" globalMinioModeErasure = "mode-server-xl"
globalMinioModeDistXL = "mode-server-distributed-xl" globalMinioModeDistErasure = "mode-server-distributed-xl"
globalMinioModeGatewayPrefix = "mode-gateway-" globalMinioModeGatewayPrefix = "mode-gateway-"
// Add new global values here. // Add new global values here.
@ -107,13 +107,13 @@ var globalCLIContext = struct {
var ( var (
// Indicates set drive count. // Indicates set drive count.
globalXLSetDriveCount int globalErasureSetDriveCount int
// Indicates if the running minio server is distributed setup. // Indicates if the running minio server is distributed setup.
globalIsDistXL = false globalIsDistErasure = false
// Indicates if the running minio server is an erasure-code backend. // Indicates if the running minio server is an erasure-code backend.
globalIsXL = false globalIsErasure = false
// Indicates if the running minio is in gateway mode. // Indicates if the running minio is in gateway mode.
globalIsGateway = false globalIsGateway = false
@ -215,6 +215,7 @@ var (
globalBucketObjectLockSys *BucketObjectLockSys globalBucketObjectLockSys *BucketObjectLockSys
globalBucketQuotaSys *BucketQuotaSys globalBucketQuotaSys *BucketQuotaSys
globalBucketVersioningSys *BucketVersioningSys
// Disk cache drives // Disk cache drives
globalCacheConfig cache.Config globalCacheConfig cache.Config

View File

@ -445,7 +445,7 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
// gets host name for current node // gets host name for current node
func getHostName(r *http.Request) (hostName string) { func getHostName(r *http.Request) (hostName string) {
if globalIsDistXL { if globalIsDistErasure {
hostName = GetLocalPeer(globalEndpoints) hostName = GetLocalPeer(globalEndpoints)
} else { } else {
hostName = r.Host hostName = r.Host

View File

@ -114,7 +114,7 @@ func Trace(f http.HandlerFunc, logBody bool, w http.ResponseWriter, r *http.Requ
reqBodyRecorder = &recordRequest{Reader: r.Body, logBody: logBody, headers: reqHeaders} reqBodyRecorder = &recordRequest{Reader: r.Body, logBody: logBody, headers: reqHeaders}
r.Body = ioutil.NopCloser(reqBodyRecorder) r.Body = ioutil.NopCloser(reqBodyRecorder)
t.NodeName = r.Host t.NodeName = r.Host
if globalIsDistXL { if globalIsDistErasure {
t.NodeName = GetLocalPeer(globalEndpoints) t.NodeName = GetLocalPeer(globalEndpoints)
} }
// strip port from the host address // strip port from the host address

Some files were not shown because too many files have changed in this diff Show More