mirror of
https://github.com/minio/minio.git
synced 2025-01-25 13:43:17 -05:00
fix: validate partNumber in queryParam as part of preConditions (#9386)
This commit is contained in:
parent
2eeb0e6a0b
commit
282c9f790a
@ -97,6 +97,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
|||||||
if !objInfo.Expires.IsZero() {
|
if !objInfo.Expires.IsZero() {
|
||||||
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
|
||||||
}
|
}
|
||||||
|
|
||||||
if globalCacheConfig.Enabled {
|
if globalCacheConfig.Enabled {
|
||||||
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
|
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
|
||||||
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
|
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
|
||||||
|
@ -847,11 +847,11 @@ func (c *diskCache) Get(ctx context.Context, bucket, object string, rs *HTTPRang
|
|||||||
if HasSuffix(object, SlashSeparator) {
|
if HasSuffix(object, SlashSeparator) {
|
||||||
// The lock taken above is released when
|
// The lock taken above is released when
|
||||||
// objReader.Close() is called by the caller.
|
// objReader.Close() is called by the caller.
|
||||||
gr, gerr := NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
gr, gerr := NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts, nsUnlocker)
|
||||||
return gr, numHits, gerr
|
return gr, numHits, gerr
|
||||||
}
|
}
|
||||||
|
|
||||||
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts, nsUnlocker)
|
||||||
if nErr != nil {
|
if nErr != nil {
|
||||||
return nil, numHits, nErr
|
return nil, numHits, nErr
|
||||||
}
|
}
|
||||||
|
@ -312,7 +312,7 @@ func (c *cacheObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
|||||||
}()
|
}()
|
||||||
cleanupBackend := func() { bkReader.Close() }
|
cleanupBackend := func() { bkReader.Close() }
|
||||||
cleanupPipe := func() { pipeWriter.Close() }
|
cleanupPipe := func() { pipeWriter.Close() }
|
||||||
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts.CheckCopyPrecondFn, cleanupBackend, cleanupPipe)
|
return NewGetObjectReaderFromReader(teeReader, bkReader.ObjInfo, opts, cleanupBackend, cleanupPipe)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns ObjectInfo from cache if available.
|
// Returns ObjectInfo from cache if available.
|
||||||
|
@ -982,6 +982,19 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
|
|||||||
encryption encrypt.ServerSide
|
encryption encrypt.ServerSide
|
||||||
opts ObjectOptions
|
opts ObjectOptions
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var partNumber int
|
||||||
|
var err error
|
||||||
|
if pn := r.URL.Query().Get("partNumber"); pn != "" {
|
||||||
|
partNumber, err = strconv.Atoi(pn)
|
||||||
|
if err != nil {
|
||||||
|
return opts, err
|
||||||
|
}
|
||||||
|
if partNumber < 0 {
|
||||||
|
return opts, errInvalidArgument
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
|
if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) {
|
||||||
key, err := crypto.SSEC.ParseHTTP(r.Header)
|
key, err := crypto.SSEC.ParseHTTP(r.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -990,10 +1003,16 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec
|
|||||||
derivedKey := deriveClientKey(key, bucket, object)
|
derivedKey := deriveClientKey(key, bucket, object)
|
||||||
encryption, err = encrypt.NewSSEC(derivedKey[:])
|
encryption, err = encrypt.NewSSEC(derivedKey[:])
|
||||||
logger.CriticalIf(ctx, err)
|
logger.CriticalIf(ctx, err)
|
||||||
return ObjectOptions{ServerSideEncryption: encryption}, nil
|
return ObjectOptions{ServerSideEncryption: encryption, PartNumber: partNumber}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// default case of passing encryption headers to backend
|
// default case of passing encryption headers to backend
|
||||||
return getDefaultOpts(r.Header, false, nil)
|
opts, err = getDefaultOpts(r.Header, false, nil)
|
||||||
|
if err != nil {
|
||||||
|
return opts, err
|
||||||
|
}
|
||||||
|
opts.PartNumber = partNumber
|
||||||
|
return opts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// get ObjectOptions for PUT calls from encryption headers and metadata
|
// get ObjectOptions for PUT calls from encryption headers and metadata
|
||||||
|
@ -561,7 +561,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
if HasSuffix(object, SlashSeparator) {
|
if HasSuffix(object, SlashSeparator) {
|
||||||
// The lock taken above is released when
|
// The lock taken above is released when
|
||||||
// objReader.Close() is called by the caller.
|
// objReader.Close() is called by the caller.
|
||||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts, nsUnlocker)
|
||||||
}
|
}
|
||||||
// Take a rwPool lock for NFS gateway type deployment
|
// Take a rwPool lock for NFS gateway type deployment
|
||||||
rwPoolUnlocker := func() {}
|
rwPoolUnlocker := func() {}
|
||||||
@ -578,7 +578,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
rwPoolUnlocker = func() { fs.rwPool.Close(fsMetaPath) }
|
rwPoolUnlocker = func() { fs.rwPool.Close(fsMetaPath) }
|
||||||
}
|
}
|
||||||
|
|
||||||
objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn, nsUnlocker, rwPoolUnlocker)
|
objReaderFn, off, length, rErr := NewGetObjectReader(rs, objInfo, opts, nsUnlocker, rwPoolUnlocker)
|
||||||
if rErr != nil {
|
if rErr != nil {
|
||||||
return nil, rErr
|
return nil, rErr
|
||||||
}
|
}
|
||||||
|
@ -742,7 +742,7 @@ func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
|||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser)
|
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - reads an object from azure. Supports additional
|
// GetObject - reads an object from azure. Supports additional
|
||||||
|
@ -478,7 +478,7 @@ func (l *b2Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
|||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser)
|
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject reads an object from B2. Supports additional
|
// GetObject reads an object from B2. Supports additional
|
||||||
|
@ -754,7 +754,7 @@ func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser)
|
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject - reads an object from GCS. Supports additional
|
// GetObject - reads an object from GCS. Supports additional
|
||||||
|
@ -466,7 +466,7 @@ func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser)
|
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,7 +315,7 @@ func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string
|
|||||||
return l.s3Objects.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
return l.s3Objects.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts)
|
||||||
}
|
}
|
||||||
objInfo.UserDefined = minio.CleanMinioInternalMetadataKeys(objInfo.UserDefined)
|
objInfo.UserDefined = minio.CleanMinioInternalMetadataKeys(objInfo.UserDefined)
|
||||||
fn, off, length, err := minio.NewGetObjectReader(rs, objInfo, o.CheckCopyPrecondFn)
|
fn, off, length, err := minio.NewGetObjectReader(rs, objInfo, o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, minio.ErrorRespToObjectError(err)
|
return nil, minio.ErrorRespToObjectError(err)
|
||||||
}
|
}
|
||||||
|
@ -401,7 +401,7 @@ func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
|||||||
// Setup cleanup function to cause the above go-routine to
|
// Setup cleanup function to cause the above go-routine to
|
||||||
// exit in case of partial read
|
// exit in case of partial read
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts.CheckCopyPrecondFn, pipeCloser)
|
return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetObject reads an object from S3. Supports additional
|
// GetObject reads an object from S3. Supports additional
|
||||||
|
@ -40,6 +40,7 @@ type GetObjectInfoFn func(ctx context.Context, bucket, object string, opts Objec
|
|||||||
type ObjectOptions struct {
|
type ObjectOptions struct {
|
||||||
ServerSideEncryption encrypt.ServerSide
|
ServerSideEncryption encrypt.ServerSide
|
||||||
UserDefined map[string]string
|
UserDefined map[string]string
|
||||||
|
PartNumber int
|
||||||
CheckCopyPrecondFn CheckCopyPreconditionFn
|
CheckCopyPrecondFn CheckCopyPreconditionFn
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -485,15 +485,15 @@ type GetObjectReader struct {
|
|||||||
pReader io.Reader
|
pReader io.Reader
|
||||||
|
|
||||||
cleanUpFns []func()
|
cleanUpFns []func()
|
||||||
precondFn func(ObjectInfo, string) bool
|
opts ObjectOptions
|
||||||
once sync.Once
|
once sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
|
// NewGetObjectReaderFromReader sets up a GetObjectReader with a given
|
||||||
// reader. This ignores any object properties.
|
// reader. This ignores any object properties.
|
||||||
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanupFns ...func()) (*GetObjectReader, error) {
|
func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions, cleanupFns ...func()) (*GetObjectReader, error) {
|
||||||
if pcfn != nil {
|
if opts.CheckCopyPrecondFn != nil {
|
||||||
if ok := pcfn(oi, ""); ok {
|
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
|
||||||
// Call the cleanup funcs
|
// Call the cleanup funcs
|
||||||
for i := len(cleanupFns) - 1; i >= 0; i-- {
|
for i := len(cleanupFns) - 1; i >= 0; i-- {
|
||||||
cleanupFns[i]()
|
cleanupFns[i]()
|
||||||
@ -505,7 +505,7 @@ func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, pcfn CheckCopyPrec
|
|||||||
ObjInfo: oi,
|
ObjInfo: oi,
|
||||||
pReader: r,
|
pReader: r,
|
||||||
cleanUpFns: cleanupFns,
|
cleanUpFns: cleanupFns,
|
||||||
precondFn: pcfn,
|
opts: opts,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -519,7 +519,7 @@ type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckCopyPrecon
|
|||||||
// are called on Close() in reverse order as passed here. NOTE: It is
|
// are called on Close() in reverse order as passed here. NOTE: It is
|
||||||
// assumed that clean up functions do not panic (otherwise, they may
|
// assumed that clean up functions do not panic (otherwise, they may
|
||||||
// not all run!).
|
// not all run!).
|
||||||
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPreconditionFn, cleanUpFns ...func()) (
|
func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cleanUpFns ...func()) (
|
||||||
fn ObjReaderFn, off, length int64, err error) {
|
fn ObjReaderFn, off, length int64, err error) {
|
||||||
|
|
||||||
// Call the clean-up functions immediately in case of exit
|
// Call the clean-up functions immediately in case of exit
|
||||||
@ -537,6 +537,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPrecondi
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, 0, err
|
return nil, 0, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var skipLen int64
|
var skipLen int64
|
||||||
// Calculate range to read (different for
|
// Calculate range to read (different for
|
||||||
// e.g. encrypted/compressed objects)
|
// e.g. encrypted/compressed objects)
|
||||||
@ -581,8 +582,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPrecondi
|
|||||||
encETag := oi.ETag
|
encETag := oi.ETag
|
||||||
oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value.
|
oi.ETag = getDecryptedETag(h, oi, copySource) // Decrypt the ETag before top layer consumes this value.
|
||||||
|
|
||||||
if pcfn != nil {
|
if opts.CheckCopyPrecondFn != nil {
|
||||||
if ok := pcfn(oi, encETag); ok {
|
if ok := opts.CheckCopyPrecondFn(oi, encETag); ok {
|
||||||
// Call the cleanup funcs
|
// Call the cleanup funcs
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
cFns[i]()
|
cFns[i]()
|
||||||
@ -600,7 +601,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPrecondi
|
|||||||
ObjInfo: oi,
|
ObjInfo: oi,
|
||||||
pReader: decReader,
|
pReader: decReader,
|
||||||
cleanUpFns: cFns,
|
cleanUpFns: cFns,
|
||||||
precondFn: pcfn,
|
opts: opts,
|
||||||
}
|
}
|
||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
@ -634,8 +635,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPrecondi
|
|||||||
}
|
}
|
||||||
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
||||||
cFns = append(cleanUpFns, cFns...)
|
cFns = append(cleanUpFns, cFns...)
|
||||||
if pcfn != nil {
|
if opts.CheckCopyPrecondFn != nil {
|
||||||
if ok := pcfn(oi, ""); ok {
|
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
|
||||||
// Call the cleanup funcs
|
// Call the cleanup funcs
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
cFns[i]()
|
cFns[i]()
|
||||||
@ -668,7 +669,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPrecondi
|
|||||||
ObjInfo: oi,
|
ObjInfo: oi,
|
||||||
pReader: decReader,
|
pReader: decReader,
|
||||||
cleanUpFns: cFns,
|
cleanUpFns: cFns,
|
||||||
precondFn: pcfn,
|
opts: opts,
|
||||||
}
|
}
|
||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
@ -680,8 +681,8 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPrecondi
|
|||||||
}
|
}
|
||||||
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
fn = func(inputReader io.Reader, _ http.Header, pcfn CheckCopyPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
|
||||||
cFns = append(cleanUpFns, cFns...)
|
cFns = append(cleanUpFns, cFns...)
|
||||||
if pcfn != nil {
|
if opts.CheckCopyPrecondFn != nil {
|
||||||
if ok := pcfn(oi, ""); ok {
|
if ok := opts.CheckCopyPrecondFn(oi, ""); ok {
|
||||||
// Call the cleanup funcs
|
// Call the cleanup funcs
|
||||||
for i := len(cFns) - 1; i >= 0; i-- {
|
for i := len(cFns) - 1; i >= 0; i-- {
|
||||||
cFns[i]()
|
cFns[i]()
|
||||||
@ -693,7 +694,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, pcfn CheckCopyPrecondi
|
|||||||
ObjInfo: oi,
|
ObjInfo: oi,
|
||||||
pReader: inputReader,
|
pReader: inputReader,
|
||||||
cleanUpFns: cFns,
|
cleanUpFns: cFns,
|
||||||
precondFn: pcfn,
|
opts: opts,
|
||||||
}
|
}
|
||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,7 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r
|
|||||||
// If-Unmodified-Since
|
// If-Unmodified-Since
|
||||||
// If-Match
|
// If-Match
|
||||||
// If-None-Match
|
// If-None-Match
|
||||||
func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo) bool {
|
func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, opts ObjectOptions) bool {
|
||||||
// Return false for methods other than GET and HEAD.
|
// Return false for methods other than GET and HEAD.
|
||||||
if r.Method != http.MethodGet && r.Method != http.MethodHead {
|
if r.Method != http.MethodGet && r.Method != http.MethodHead {
|
||||||
return false
|
return false
|
||||||
@ -170,6 +170,14 @@ func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Requ
|
|||||||
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
|
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if the part number is correct.
|
||||||
|
if opts.PartNumber > 0 && opts.PartNumber != len(objInfo.Parts) {
|
||||||
|
writeHeaders()
|
||||||
|
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL, guessIsBrowserReq(r))
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// If-Modified-Since : Return the object only if it has been modified since the specified time,
|
// If-Modified-Since : Return the object only if it has been modified since the specified time,
|
||||||
// otherwise return a 304 (not modified).
|
// otherwise return a 304 (not modified).
|
||||||
ifModifiedSinceHeader := r.Header.Get(xhttp.IfModifiedSince)
|
ifModifiedSinceHeader := r.Header.Get(xhttp.IfModifiedSince)
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* MinIO Cloud Storage, (C) 2015-2018 MinIO, Inc.
|
* MinIO Cloud Storage, (C) 2015-2020 MinIO, Inc.
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
* you may not use this file except in compliance with the License.
|
* you may not use this file except in compliance with the License.
|
||||||
@ -74,7 +74,7 @@ const (
|
|||||||
// setHeadGetRespHeaders - set any requested parameters as response headers.
|
// setHeadGetRespHeaders - set any requested parameters as response headers.
|
||||||
func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
|
func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) {
|
||||||
for k, v := range reqParams {
|
for k, v := range reqParams {
|
||||||
if header, ok := supportedHeadGetReqParams[k]; ok {
|
if header, ok := supportedHeadGetReqParams[strings.ToLower(k)]; ok {
|
||||||
w.Header()[header] = v
|
w.Header()[header] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -376,7 +376,7 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate pre-conditions if any.
|
// Validate pre-conditions if any.
|
||||||
if checkPreconditions(ctx, w, r, objInfo) {
|
if checkPreconditions(ctx, w, r, objInfo, opts) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -566,7 +566,7 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate pre-conditions if any.
|
// Validate pre-conditions if any.
|
||||||
if checkPreconditions(ctx, w, r, objInfo) {
|
if checkPreconditions(ctx, w, r, objInfo, opts) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, del
|
|||||||
} else {
|
} else {
|
||||||
// Set the Mode to a "regular" file.
|
// Set the Mode to a "regular" file.
|
||||||
var err error
|
var err error
|
||||||
objInfo, err = xl.getObjectInfo(ctx, bucket, entry)
|
objInfo, err = xl.getObjectInfo(ctx, bucket, entry, ObjectOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Ignore errFileNotFound as the object might have got
|
// Ignore errFileNotFound as the object might have got
|
||||||
// deleted in the interim period of listing and getObjectInfo(),
|
// deleted in the interim period of listing and getObjectInfo(),
|
||||||
|
@ -50,7 +50,7 @@ func (xl xlObjects) getMultipartSHADir(bucket, object string) string {
|
|||||||
|
|
||||||
// checkUploadIDExists - verify if a given uploadID exists and is valid.
|
// checkUploadIDExists - verify if a given uploadID exists and is valid.
|
||||||
func (xl xlObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error {
|
func (xl xlObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) error {
|
||||||
_, err := xl.getObjectInfo(ctx, minioMetaMultipartBucket, xl.getUploadIDDir(bucket, object, uploadID))
|
_, err := xl.getObjectInfo(ctx, minioMetaMultipartBucket, xl.getUploadIDDir(bucket, object, uploadID), ObjectOptions{})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -675,7 +675,7 @@ func (xl xlObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||||||
if xl.isObject(bucket, object) {
|
if xl.isObject(bucket, object) {
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if isWORMEnabled(bucket) {
|
if isWORMEnabled(bucket) {
|
||||||
if _, err := xl.getObjectInfo(ctx, bucket, object); err == nil {
|
if _, err := xl.getObjectInfo(ctx, bucket, object, ObjectOptions{}); err == nil {
|
||||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -137,16 +137,16 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
|||||||
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
|
if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil {
|
||||||
return nil, toObjectErr(err, bucket, object)
|
return nil, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn)
|
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
var objInfo ObjectInfo
|
var objInfo ObjectInfo
|
||||||
objInfo, err = xl.getObjectInfo(ctx, bucket, object)
|
objInfo, err = xl.getObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, toObjectErr(err, bucket, object)
|
return nil, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts.CheckCopyPrecondFn)
|
fn, off, length, nErr := NewGetObjectReader(rs, objInfo, opts)
|
||||||
if nErr != nil {
|
if nErr != nil {
|
||||||
return nil, nErr
|
return nil, nErr
|
||||||
}
|
}
|
||||||
@ -156,6 +156,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r
|
|||||||
err := xl.getObject(ctx, bucket, object, off, length, pw, "", opts)
|
err := xl.getObject(ctx, bucket, object, off, length, pw, "", opts)
|
||||||
pw.CloseWithError(err)
|
pw.CloseWithError(err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Cleanup function to cause the go routine above to exit, in
|
// Cleanup function to cause the go routine above to exit, in
|
||||||
// case of incomplete read.
|
// case of incomplete read.
|
||||||
pipeCloser := func() { pr.Close() }
|
pipeCloser := func() { pr.Close() }
|
||||||
@ -365,7 +366,7 @@ func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, op
|
|||||||
return info, nil
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := xl.getObjectInfo(ctx, bucket, object)
|
info, err := xl.getObjectInfo(ctx, bucket, object, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oi, toObjectErr(err, bucket, object)
|
return oi, toObjectErr(err, bucket, object)
|
||||||
}
|
}
|
||||||
@ -374,7 +375,7 @@ func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
|
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
|
||||||
func (xl xlObjects) getObjectInfo(ctx context.Context, bucket, object string) (objInfo ObjectInfo, err error) {
|
func (xl xlObjects) getObjectInfo(ctx context.Context, bucket, object string, opt ObjectOptions) (objInfo ObjectInfo, err error) {
|
||||||
disks := xl.getDisks()
|
disks := xl.getDisks()
|
||||||
|
|
||||||
// Read metadata associated with the object from all disks.
|
// Read metadata associated with the object from all disks.
|
||||||
@ -629,7 +630,7 @@ func (xl xlObjects) putObject(ctx context.Context, bucket string, object string,
|
|||||||
if xl.isObject(bucket, object) {
|
if xl.isObject(bucket, object) {
|
||||||
// Deny if WORM is enabled
|
// Deny if WORM is enabled
|
||||||
if isWORMEnabled(bucket) {
|
if isWORMEnabled(bucket) {
|
||||||
if _, err := xl.getObjectInfo(ctx, bucket, object); err == nil {
|
if _, err := xl.getObjectInfo(ctx, bucket, object, ObjectOptions{}); err == nil {
|
||||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user