mirror of
https://github.com/minio/minio.git
synced 2024-12-24 06:05:55 -05:00
fix: apply pre-conditions first on object metadata (#12545)
This change in error flow complies with AWS S3 behavior for applications depending on specific error conditions. fixes #12543
This commit is contained in:
parent
9bf1ac0bb6
commit
41caf89cf4
@ -342,7 +342,7 @@ func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs
|
|||||||
closer := func() {
|
closer := func() {
|
||||||
reader.Close()
|
reader.Close()
|
||||||
}
|
}
|
||||||
return fn(reader, h, opts.CheckPrecondFn, closer)
|
return fn(reader, h, closer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestoreRequestType represents type of restore.
|
// RestoreRequestType represents type of restore.
|
||||||
|
@ -1122,7 +1122,7 @@ func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs
|
|||||||
}
|
}
|
||||||
closeReader := func() { obj.Close() }
|
closeReader := func() { obj.Close() }
|
||||||
|
|
||||||
reader, err := fn(obj, h, opts.CheckPrecondFn, closeReader)
|
reader, err := fn(obj, h, closeReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
@ -963,7 +963,7 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
|||||||
// case of incomplete read.
|
// case of incomplete read.
|
||||||
pipeCloser := func() { pr.CloseWithError(nil) }
|
pipeCloser := func() { pr.CloseWithError(nil) }
|
||||||
|
|
||||||
gr, gerr := fn(pr, h, opts.CheckPrecondFn, pipeCloser)
|
gr, gerr := fn(pr, h, pipeCloser)
|
||||||
if gerr != nil {
|
if gerr != nil {
|
||||||
return gr, numHits, gerr
|
return gr, numHits, gerr
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri
|
|||||||
pr.CloseWithError(nil)
|
pr.CloseWithError(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fn(pr, h, opts.CheckPrecondFn, pipeCloser, nsUnlocker)
|
return fn(pr, h, pipeCloser, nsUnlocker)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
|
func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, fi FileInfo, metaArr []FileInfo, onlineDisks []StorageAPI) error {
|
||||||
|
@ -617,114 +617,44 @@ func (z *erasureServerPools) GetObjectNInfo(ctx context.Context, bucket, object
|
|||||||
unlockOnDefer = true
|
unlockOnDefer = true
|
||||||
}
|
}
|
||||||
|
|
||||||
lockType = noLock // do not take locks at lower levels
|
|
||||||
checkPrecondFn := opts.CheckPrecondFn
|
checkPrecondFn := opts.CheckPrecondFn
|
||||||
opts.CheckPrecondFn = nil
|
opts.CheckPrecondFn = nil // do not need to apply pre-conditions at lower layer.
|
||||||
results := make([]struct {
|
opts.NoLock = true // no locks needed at lower levels for getObjectInfo()
|
||||||
zIdx int
|
objInfo, zIdx, err := z.getLatestObjectInfoWithIdx(ctx, bucket, object, opts)
|
||||||
gr *GetObjectReader
|
if err != nil {
|
||||||
err error
|
if objInfo.DeleteMarker {
|
||||||
}, len(z.serverPools))
|
if opts.VersionID == "" {
|
||||||
|
return &GetObjectReader{
|
||||||
var wg sync.WaitGroup
|
ObjInfo: objInfo,
|
||||||
for i, pool := range z.serverPools {
|
}, toObjectErr(errFileNotFound, bucket, object)
|
||||||
wg.Add(1)
|
|
||||||
go func(i int, pool *erasureSets) {
|
|
||||||
defer wg.Done()
|
|
||||||
results[i].zIdx = i
|
|
||||||
results[i].gr, results[i].err = pool.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
|
||||||
}(i, pool)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Sort the objInfos such that we always serve latest
|
|
||||||
// this is a defensive change to handle any duplicate
|
|
||||||
// content that may have been created, we always serve
|
|
||||||
// the latest object.
|
|
||||||
sort.Slice(results, func(i, j int) bool {
|
|
||||||
var mtimeI, mtimeJ time.Time
|
|
||||||
|
|
||||||
if results[i].gr != nil {
|
|
||||||
mtimeI = results[i].gr.ObjInfo.ModTime
|
|
||||||
}
|
|
||||||
if results[j].gr != nil {
|
|
||||||
mtimeJ = results[j].gr.ObjInfo.ModTime
|
|
||||||
}
|
|
||||||
// On tiebreaks, choose the earliest.
|
|
||||||
if mtimeI.Equal(mtimeJ) {
|
|
||||||
return results[i].zIdx < results[j].zIdx
|
|
||||||
}
|
|
||||||
return mtimeI.After(mtimeJ)
|
|
||||||
})
|
|
||||||
|
|
||||||
var found = -1
|
|
||||||
for i, res := range results {
|
|
||||||
if res.err == nil {
|
|
||||||
// Select earliest result
|
|
||||||
found = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if !isErrObjectNotFound(res.err) && !isErrVersionNotFound(res.err) {
|
|
||||||
for _, res := range results {
|
|
||||||
res.gr.Close()
|
|
||||||
}
|
}
|
||||||
return nil, res.err
|
// Make sure to return object info to provide extra information.
|
||||||
|
return &GetObjectReader{
|
||||||
|
ObjInfo: objInfo,
|
||||||
|
}, toObjectErr(errMethodNotAllowed, bucket, object)
|
||||||
}
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if found < 0 {
|
// check preconditions before reading the stream.
|
||||||
object = decodeDirObject(object)
|
if checkPrecondFn != nil && checkPrecondFn(objInfo) {
|
||||||
if opts.VersionID != "" {
|
|
||||||
return gr, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
|
|
||||||
}
|
|
||||||
return gr, ObjectNotFound{Bucket: bucket, Object: object}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check preconditions.
|
|
||||||
if checkPrecondFn != nil && checkPrecondFn(results[found].gr.ObjInfo) {
|
|
||||||
for _, res := range results {
|
|
||||||
res.gr.Close()
|
|
||||||
}
|
|
||||||
return nil, PreConditionFailed{}
|
return nil, PreConditionFailed{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close all others
|
lockType = noLock // do not take locks at lower levels for GetObjectNInfo()
|
||||||
for i, res := range results {
|
return z.serverPools[zIdx].GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||||
if i == found {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
res.gr.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
return results[found].gr, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
// getLatestObjectInfoWithIdx returns the objectInfo of the latest object from multiple pools (this function
|
||||||
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
// is present in-case there were duplicate writes to both pools, this function also returns the
|
||||||
return objInfo, err
|
// additional index where the latest object exists, that is used to start the GetObject stream.
|
||||||
}
|
func (z *erasureServerPools) getLatestObjectInfoWithIdx(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, int, error) {
|
||||||
|
|
||||||
object = encodeDirObject(object)
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
if z.SinglePool() {
|
|
||||||
return z.serverPools[0].GetObjectInfo(ctx, bucket, object, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lock the object before reading.
|
|
||||||
lk := z.NewNSLock(bucket, object)
|
|
||||||
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return ObjectInfo{}, err
|
|
||||||
}
|
|
||||||
ctx = lkctx.Context()
|
|
||||||
defer lk.RUnlock(lkctx.Cancel)
|
|
||||||
|
|
||||||
results := make([]struct {
|
results := make([]struct {
|
||||||
zIdx int
|
zIdx int
|
||||||
oi ObjectInfo
|
oi ObjectInfo
|
||||||
err error
|
err error
|
||||||
}, len(z.serverPools))
|
}, len(z.serverPools))
|
||||||
opts.NoLock = true // avoid taking locks at lower levels for multi-pool setups.
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i, pool := range z.serverPools {
|
for i, pool := range z.serverPools {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@ -748,24 +678,52 @@ func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object s
|
|||||||
}
|
}
|
||||||
return a.oi.ModTime.After(b.oi.ModTime)
|
return a.oi.ModTime.After(b.oi.ModTime)
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, res := range results {
|
for _, res := range results {
|
||||||
// Return first found.
|
|
||||||
err := res.err
|
err := res.err
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return res.oi, nil
|
return res.oi, res.zIdx, nil
|
||||||
}
|
}
|
||||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||||
// some errors such as MethodNotAllowed for delete marker
|
// some errors such as MethodNotAllowed for delete marker
|
||||||
// should be returned upwards.
|
// should be returned upwards.
|
||||||
return res.oi, err
|
return res.oi, res.zIdx, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
object = decodeDirObject(object)
|
object = decodeDirObject(object)
|
||||||
if opts.VersionID != "" {
|
if opts.VersionID != "" {
|
||||||
return objInfo, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
|
return ObjectInfo{}, -1, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID}
|
||||||
}
|
}
|
||||||
return objInfo, ObjectNotFound{Bucket: bucket, Object: object}
|
return ObjectInfo{}, -1, ObjectNotFound{Bucket: bucket, Object: object}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (z *erasureServerPools) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
|
if err = checkGetObjArgs(ctx, bucket, object); err != nil {
|
||||||
|
return objInfo, err
|
||||||
|
}
|
||||||
|
|
||||||
|
object = encodeDirObject(object)
|
||||||
|
|
||||||
|
if z.SinglePool() {
|
||||||
|
return z.serverPools[0].GetObjectInfo(ctx, bucket, object, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.NoLock {
|
||||||
|
opts.NoLock = true // avoid taking locks at lower levels for multi-pool setups.
|
||||||
|
|
||||||
|
// Lock the object before reading.
|
||||||
|
lk := z.NewNSLock(bucket, object)
|
||||||
|
lkctx, err := lk.GetRLock(ctx, globalOperationTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return ObjectInfo{}, err
|
||||||
|
}
|
||||||
|
ctx = lkctx.Context()
|
||||||
|
defer lk.RUnlock(lkctx.Cancel)
|
||||||
|
}
|
||||||
|
|
||||||
|
objInfo, _, err = z.getLatestObjectInfoWithIdx(ctx, bucket, object, opts)
|
||||||
|
return objInfo, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutObject - writes an object to least used erasure pool.
|
// PutObject - writes an object to least used erasure pool.
|
||||||
|
@ -806,7 +806,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return objReaderFn(reader, h, opts.CheckPrecondFn, closeFn, rwPoolUnlocker, nsUnlocker)
|
return objReaderFn(reader, h, closeFn, rwPoolUnlocker, nsUnlocker)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new fs.json file, if the existing one is corrupt. Should happen very rarely.
|
// Create a new fs.json file, if the existing one is corrupt. Should happen very rarely.
|
||||||
|
@ -345,7 +345,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
|||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return fn(pr, h, o.CheckPrecondFn, pipeCloser)
|
return fn(pr, h, pipeCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObjectInfo reads object info and replies back ObjectInfo
|
// GetObjectInfo reads object info and replies back ObjectInfo
|
||||||
|
@ -404,7 +404,7 @@ func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
|||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return fn(pr, h, opts.CheckPrecondFn, pipeCloser)
|
return fn(pr, h, pipeCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject reads an object from S3. Supports additional
|
// GetObject reads an object from S3. Supports additional
|
||||||
|
@ -582,7 +582,7 @@ func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions
|
|||||||
// GetObjectReader and an error. Request headers are passed to provide
|
// GetObjectReader and an error. Request headers are passed to provide
|
||||||
// encryption parameters. cleanupFns allow cleanup funcs to be
|
// encryption parameters. cleanupFns allow cleanup funcs to be
|
||||||
// registered for calling after usage of the reader.
|
// registered for calling after usage of the reader.
|
||||||
type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error)
|
type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func()) (r *GetObjectReader, err error)
|
||||||
|
|
||||||
// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
|
// NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
|
||||||
// are called on Close() in FIFO order as passed in ObjReadFn(). NOTE: It is
|
// are called on Close() in FIFO order as passed in ObjReadFn(). NOTE: It is
|
||||||
@ -591,6 +591,10 @@ type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckPreconditi
|
|||||||
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
||||||
fn ObjReaderFn, off, length int64, err error) {
|
fn ObjReaderFn, off, length int64, err error) {
|
||||||
|
|
||||||
|
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
||||||
|
return nil, 0, 0, PreConditionFailed{}
|
||||||
|
}
|
||||||
|
|
||||||
if rs == nil && opts.PartNumber > 0 {
|
if rs == nil && opts.PartNumber > 0 {
|
||||||
rs = partNumberToRangeSpec(oi, opts.PartNumber)
|
rs = partNumberToRangeSpec(oi, opts.PartNumber)
|
||||||
}
|
}
|
||||||
@ -648,21 +652,15 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
return nil, 0, 0, errInvalidRange
|
return nil, 0, 0, errInvalidRange
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fn = func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
|
||||||
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
|
||||||
for _, cFn := range cFns {
|
|
||||||
cFn()
|
|
||||||
}
|
|
||||||
return nil, PreConditionFailed{}
|
|
||||||
}
|
|
||||||
if isEncrypted {
|
if isEncrypted {
|
||||||
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
||||||
// Attach decrypter on inputReader
|
// Attach decrypter on inputReader
|
||||||
inputReader, err = DecryptBlocksRequestR(inputReader, h, 0, firstPart, oi, copySource)
|
inputReader, err = DecryptBlocksRequestR(inputReader, h, 0, firstPart, oi, copySource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Call the cleanup funcs
|
// Call the cleanup funcs
|
||||||
for _, cFn := range cFns {
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
cFn()
|
cFns[i]()
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -674,8 +672,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
if decOff > 0 {
|
if decOff > 0 {
|
||||||
if err = s2Reader.Skip(decOff); err != nil {
|
if err = s2Reader.Skip(decOff); err != nil {
|
||||||
// Call the cleanup funcs
|
// Call the cleanup funcs
|
||||||
for _, cFn := range cFns {
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
cFn()
|
cFns[i]()
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -727,7 +725,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
// a reader that returns the desired range of
|
// a reader that returns the desired range of
|
||||||
// encrypted bytes. The header parameter is used to
|
// encrypted bytes. The header parameter is used to
|
||||||
// provide encryption parameters.
|
// provide encryption parameters.
|
||||||
fn = func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
|
||||||
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
|
||||||
|
|
||||||
// Attach decrypter on inputReader
|
// Attach decrypter on inputReader
|
||||||
@ -741,14 +739,6 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
|
||||||
// Call the cleanup funcs
|
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
|
||||||
cFns[i]()
|
|
||||||
}
|
|
||||||
return nil, PreConditionFailed{}
|
|
||||||
}
|
|
||||||
|
|
||||||
oi.ETag = getDecryptedETag(h, oi, false)
|
oi.ETag = getDecryptedETag(h, oi, false)
|
||||||
|
|
||||||
// Apply the skipLen and limit on the
|
// Apply the skipLen and limit on the
|
||||||
@ -770,14 +760,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, 0, err
|
return nil, 0, 0, err
|
||||||
}
|
}
|
||||||
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
|
||||||
if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
|
|
||||||
// Call the cleanup funcs
|
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
|
||||||
cFns[i]()
|
|
||||||
}
|
|
||||||
return nil, PreConditionFailed{}
|
|
||||||
}
|
|
||||||
r = &GetObjectReader{
|
r = &GetObjectReader{
|
||||||
ObjInfo: oi,
|
ObjInfo: oi,
|
||||||
Reader: inputReader,
|
Reader: inputReader,
|
||||||
|
Loading…
Reference in New Issue
Block a user