Name return values to prevent the need (and unnecessary code bloat) (#4576)

This is done to explicitly instantiate objects for every return statement.
This commit is contained in:
Frank Wessels 2017-06-21 19:53:09 -07:00 committed by Harshavardhana
parent cec8b238f3
commit 46897b1100
24 changed files with 304 additions and 306 deletions

View File

@ -113,15 +113,15 @@ func (rc remoteAdminClient) ReInitDisks() error {
}
// ServerInfoData - Returns the server info of this server.
func (lc localAdminClient) ServerInfoData() (ServerInfoData, error) {
func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
if globalBootTime.IsZero() {
return ServerInfoData{}, errServerNotInitialized
return sid, errServerNotInitialized
}
// Build storage info
objLayer := newObjectLayerFn()
if objLayer == nil {
return ServerInfoData{}, errServerNotInitialized
return sid, errServerNotInitialized
}
storage := objLayer.StorageInfo()
@ -145,12 +145,12 @@ func (lc localAdminClient) ServerInfoData() (ServerInfoData, error) {
}
// ServerInfo - returns the server info of the server to which the RPC call is made.
func (rc remoteAdminClient) ServerInfoData() (ServerInfoData, error) {
func (rc remoteAdminClient) ServerInfoData() (sid ServerInfoData, e error) {
args := AuthRPCArgs{}
reply := ServerInfoDataReply{}
err := rc.Call(serverInfoDataRPC, &args, &reply)
if err != nil {
return ServerInfoData{}, err
return sid, err
}
return reply.ServerInfoData, nil
@ -493,7 +493,7 @@ func getPeerConfig(peers adminPeers) ([]byte, error) {
// getValidServerConfig - finds the server config that is present in
// quorum or more number of servers.
func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (serverConfigV13, error) {
func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (scv serverConfigV13, e error) {
// majority-based quorum
quorum := len(serverConfigs)/2 + 1
@ -566,7 +566,7 @@ func getValidServerConfig(serverConfigs []serverConfigV13, errs []error) (server
// If quorum nodes don't agree.
if maxOccurrence < quorum {
return serverConfigV13{}, errXLWriteQuorum
return scv, errXLWriteQuorum
}
return configJSON, nil

View File

@ -77,14 +77,14 @@ func (endpoint Endpoint) SetHTTP() {
}
// NewEndpoint - returns new endpoint based on given arguments.
func NewEndpoint(arg string) (Endpoint, error) {
func NewEndpoint(arg string) (ep Endpoint, e error) {
// isEmptyPath - check whether given path is not empty.
isEmptyPath := func(path string) bool {
return path == "" || path == "/" || path == `\`
}
if isEmptyPath(arg) {
return Endpoint{}, fmt.Errorf("empty or root endpoint is not supported")
return ep, fmt.Errorf("empty or root endpoint is not supported")
}
var isLocal bool
@ -96,13 +96,13 @@ func NewEndpoint(arg string) (Endpoint, error) {
// - All field should be empty except Host and Path.
if !((u.Scheme == "http" || u.Scheme == "https") &&
u.User == nil && u.Opaque == "" && u.ForceQuery == false && u.RawQuery == "" && u.Fragment == "") {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format")
return ep, fmt.Errorf("invalid URL endpoint format")
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
if !strings.Contains(err.Error(), "missing port in address") {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: %s", err)
return ep, fmt.Errorf("invalid URL endpoint format: %s", err)
}
host = u.Host
@ -110,26 +110,26 @@ func NewEndpoint(arg string) (Endpoint, error) {
var p int
p, err = strconv.Atoi(port)
if err != nil {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: invalid port number")
return ep, fmt.Errorf("invalid URL endpoint format: invalid port number")
} else if p < 1 || p > 65535 {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535")
return ep, fmt.Errorf("invalid URL endpoint format: port number must be between 1 to 65535")
}
}
if host == "" {
return Endpoint{}, fmt.Errorf("invalid URL endpoint format: empty host name")
return ep, fmt.Errorf("invalid URL endpoint format: empty host name")
}
// As this is path in the URL, we should use path package, not filepath package.
// On MS Windows, filepath.Clean() converts into Windows path style ie `/foo` becomes `\foo`
u.Path = path.Clean(u.Path)
if isEmptyPath(u.Path) {
return Endpoint{}, fmt.Errorf("empty or root path is not supported in URL endpoint")
return ep, fmt.Errorf("empty or root path is not supported in URL endpoint")
}
isLocal, err = isLocalHost(host)
if err != nil {
return Endpoint{}, err
return ep, err
}
} else {
u = &url.URL{Path: path.Clean(arg)}

View File

@ -159,7 +159,7 @@ func (fs fsObjects) listMultipartUploadIDs(bucketName, objectName, uploadIDMarke
}
// listMultipartUploads - lists all multipart uploads.
func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
result := ListMultipartsInfo{}
recursive := true
if delimiter == slashSeparator {
@ -191,7 +191,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
if uploadIDMarker != "" {
uploads, _, err = fs.listMultipartUploadIDs(bucket, keyMarker, uploadIDMarker, maxUploads)
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
maxUploads = maxUploads - len(uploads)
}
@ -232,7 +232,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
eof = true
break
}
return ListMultipartsInfo{}, walkResult.err
return lmi, walkResult.err
}
entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket))
@ -256,7 +256,7 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
tmpUploads, end, err = fs.listMultipartUploadIDs(bucket, entry, uploadIDMarker, maxUploads)
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
uploads = append(uploads, tmpUploads...)
@ -311,13 +311,13 @@ func (fs fsObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// Implements S3 compatible ListMultipartUploads API. The resulting
// ListMultipartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
func (fs fsObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, fs); err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ListMultipartsInfo{}, toObjectErr(err, bucket)
return lmi, toObjectErr(err, bucket)
}
return fs.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
@ -412,9 +412,9 @@ func partToAppend(fsMeta fsMetaV1, fsAppendMeta fsMetaV1) (part objectPartInfo,
// CopyObjectPart - similar to PutObjectPart but reads data from an existing
// object. Internally incoming data is written to '.minio.sys/tmp' location
// and safely renamed to '.minio.sys/multipart' for reach parts.
func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (PartInfo, error) {
func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, e error) {
if err := checkNewMultipartArgs(srcBucket, srcObject, fs); err != nil {
return PartInfo{}, err
return pi, err
}
// Initialize pipe.
@ -431,7 +431,7 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
partInfo, err := fs.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "")
if err != nil {
return PartInfo{}, toObjectErr(err, dstBucket, dstObject)
return pi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
@ -444,13 +444,13 @@ func (fs fsObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
// an ongoing multipart transaction. Internally incoming data is
// written to '.minio.sys/tmp' location and safely renamed to
// '.minio.sys/multipart' for reach parts.
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, fs); err != nil {
return PartInfo{}, err
return pi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return PartInfo{}, toObjectErr(err, bucket)
return pi, toObjectErr(err, bucket)
}
// Hold the lock so that two parallel complete-multipart-uploads
@ -463,9 +463,9 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
uploadsPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object, uploadsJSONFile)
if _, err := fs.rwPool.Open(uploadsPath); err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return pi, traceError(InvalidUploadID{UploadID: uploadID})
}
return PartInfo{}, toObjectErr(traceError(err), bucket, object)
return pi, toObjectErr(traceError(err), bucket, object)
}
defer fs.rwPool.Close(uploadsPath)
@ -476,16 +476,16 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
rwlk, err := fs.rwPool.Write(fsMetaPath)
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return pi, traceError(InvalidUploadID{UploadID: uploadID})
}
return PartInfo{}, toObjectErr(traceError(err), bucket, object)
return pi, toObjectErr(traceError(err), bucket, object)
}
defer rwlk.Close()
fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom(rwlk)
if err != nil {
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPath)
return pi, toObjectErr(err, minioMetaMultipartBucket, fsMetaPath)
}
partSuffix := fmt.Sprintf("object%d", partID)
@ -523,14 +523,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
bytesWritten, cErr := fsCreateFile(fsPartPath, teeReader, buf, size)
if cErr != nil {
fsRemoveFile(fsPartPath)
return PartInfo{}, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
return pi, toObjectErr(cErr, minioMetaTmpBucket, tmpPartPath)
}
// Should return IncompleteBody{} error when reader has fewer
// bytes than specified in request header.
if bytesWritten < size {
fsRemoveFile(fsPartPath)
return PartInfo{}, traceError(IncompleteBody{})
return pi, traceError(IncompleteBody{})
}
// Delete temporary part in case of failure. If
@ -541,14 +541,14 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
newMD5Hex := hex.EncodeToString(md5Writer.Sum(nil))
if md5Hex != "" {
if newMD5Hex != md5Hex {
return PartInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
return pi, traceError(BadDigest{md5Hex, newMD5Hex})
}
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return PartInfo{}, traceError(SHA256Mismatch{})
return pi, traceError(SHA256Mismatch{})
}
}
@ -561,20 +561,20 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
fsNSPartPath := pathJoin(fs.fsPath, minioMetaMultipartBucket, partPath)
if err = fsRenameFile(fsPartPath, fsNSPartPath); err != nil {
partLock.Unlock()
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partPath)
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
// Save the object part info in `fs.json`.
fsMeta.AddObjectPart(partID, partSuffix, newMD5Hex, size)
if _, err = fsMeta.WriteTo(rwlk); err != nil {
partLock.Unlock()
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
return pi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, partSuffix)
fi, err := fsStatFile(partNamePath)
if err != nil {
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partSuffix)
return pi, toObjectErr(err, minioMetaMultipartBucket, partSuffix)
}
// Append the part in background.
@ -599,7 +599,7 @@ func (fs fsObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// listObjectParts - wrapper scanning through
// '.minio.sys/multipart/bucket/object/UPLOADID'. Lists all the parts
// saved inside '.minio.sys/multipart/bucket/object/UPLOADID'.
func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) {
result := ListPartsInfo{}
uploadIDPath := pathJoin(bucket, object, uploadID)
@ -608,16 +608,16 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
// On windows oddly this is returned.
return ListPartsInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return lpi, traceError(InvalidUploadID{UploadID: uploadID})
}
return ListPartsInfo{}, toObjectErr(traceError(err), bucket, object)
return lpi, toObjectErr(traceError(err), bucket, object)
}
defer fs.rwPool.Close(fsMetaPath)
fsMeta := fsMetaV1{}
_, err = fsMeta.ReadFrom(metaFile.LockedFile)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, fsMetaPath)
return lpi, toObjectErr(err, minioMetaBucket, fsMetaPath)
}
// Only parts with higher part numbers will be listed.
@ -633,7 +633,7 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
partNamePath := pathJoin(fs.fsPath, minioMetaMultipartBucket, uploadIDPath, part.Name)
fi, err = fsStatFile(partNamePath)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaMultipartBucket, partNamePath)
return lpi, toObjectErr(err, minioMetaMultipartBucket, partNamePath)
}
result.Parts = append(result.Parts, PartInfo{
PartNumber: part.Number,
@ -671,13 +671,13 @@ func (fs fsObjects) listObjectParts(bucket, object, uploadID string, partNumberM
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) {
if err := checkListPartsArgs(bucket, object, fs); err != nil {
return ListPartsInfo{}, err
return lpi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ListPartsInfo{}, toObjectErr(err, bucket)
return lpi, toObjectErr(err, bucket)
}
// Hold the lock so that two parallel complete-multipart-uploads
@ -688,7 +688,7 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
listPartsInfo, err := fs.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, bucket, object)
return lpi, toObjectErr(err, bucket, object)
}
// Success.
@ -701,24 +701,24 @@ func (fs fsObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
// md5sums of all the parts.
//
// Implements S3 compatible Complete multipart API.
func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, error) {
func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (oi ObjectInfo, e error) {
if err := checkCompleteMultipartArgs(bucket, object, fs); err != nil {
return ObjectInfo{}, err
return oi, err
}
// Check if an object is present as one of the parent dir.
if fs.parentDirIsObject(bucket, pathutil.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object)
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket)
return oi, toObjectErr(err, bucket)
}
// Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := getCompleteMultipartMD5(parts)
if err != nil {
return ObjectInfo{}, err
return oi, err
}
uploadIDPath := pathJoin(bucket, object, uploadID)
@ -733,9 +733,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
rlk, err := fs.rwPool.Open(fsMetaPathMultipart)
if err != nil {
if err == errFileNotFound || err == errFileAccessDenied {
return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return oi, traceError(InvalidUploadID{UploadID: uploadID})
}
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
// Disallow any parallel abort or complete multipart operations.
@ -743,9 +743,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
if err == errFileNotFound || err == errFileAccessDenied {
return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return oi, traceError(InvalidUploadID{UploadID: uploadID})
}
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
defer rwlk.Close()
@ -754,7 +754,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
_, err = fsMeta.ReadFrom(rlk.LockedFile)
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart)
return oi, toObjectErr(err, minioMetaMultipartBucket, fsMetaPathMultipart)
}
// Wait for any competing PutObject() operation on bucket/object, since same namespace
@ -763,7 +763,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
metaFile, err := fs.rwPool.Create(fsMetaPath)
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
defer metaFile.Close()
@ -780,7 +780,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID)
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, uploadID)
return oi, toObjectErr(err, minioMetaTmpBucket, uploadID)
}
}
}
@ -803,18 +803,18 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
partIdx := fsMeta.ObjectPartIndex(part.PartNumber)
if partIdx == -1 {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, traceError(InvalidPart{})
return oi, traceError(InvalidPart{})
}
if fsMeta.Parts[partIdx].ETag != part.ETag {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, traceError(InvalidPart{})
return oi, traceError(InvalidPart{})
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(fsMeta.Parts[partIdx].Size) {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, traceError(PartTooSmall{
return oi, traceError(PartTooSmall{
PartNumber: part.PartNumber,
PartSize: fsMeta.Parts[partIdx].Size,
PartETag: part.ETag,
@ -831,9 +831,9 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
if err == errFileNotFound {
return ObjectInfo{}, traceError(InvalidPart{})
return oi, traceError(InvalidPart{})
}
return ObjectInfo{}, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix)
return oi, toObjectErr(traceError(err), minioMetaMultipartBucket, partSuffix)
}
// No need to hold a lock, this is a unique file and will be only written
@ -843,7 +843,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err != nil {
reader.Close()
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
_, err = io.CopyBuffer(wfile, reader, buf)
@ -851,7 +851,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
wfile.Close()
reader.Close()
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
wfile.Close()
@ -860,7 +860,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
if err = fsRenameFile(fsTmpObjPath, fsNSObjPath); err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, uploadID)
return oi, toObjectErr(err, minioMetaTmpBucket, uploadID)
}
}
@ -876,7 +876,7 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
// Write all the set metadata.
if _, err = fsMeta.WriteTo(metaFile); err != nil {
fs.rwPool.Close(fsMetaPathMultipart)
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
// Close lock held on bucket/object/uploadid/fs.json,
@ -888,17 +888,17 @@ func (fs fsObjects) CompleteMultipartUpload(bucket string, object string, upload
multipartObjectDir := pathJoin(fs.fsPath, minioMetaMultipartBucket, bucket, object)
multipartUploadIDDir := pathJoin(multipartObjectDir, uploadID)
if err = fsRemoveUploadIDPath(multipartObjectDir, multipartUploadIDDir); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
// Remove entry from `uploads.json`.
if err = fs.removeUploadID(bucket, object, uploadID, rwlk); err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object))
return oi, toObjectErr(err, minioMetaMultipartBucket, pathutil.Join(bucket, object))
}
fi, err := fsStatFile(fsNSObjPath)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
// Return object info.

View File

@ -214,10 +214,10 @@ func (fs fsObjects) MakeBucketWithLocation(bucket, location string) error {
}
// GetBucketInfo - fetch bucket metadata info.
func (fs fsObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
func (fs fsObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
st, err := fs.statBucketDir(bucket)
if err != nil {
return BucketInfo{}, toObjectErr(err, bucket)
return bi, toObjectErr(err, bucket)
}
// As osStat() doesn't carry other than ModTime(), use ModTime() as CreatedTime.
@ -304,15 +304,15 @@ func (fs fsObjects) DeleteBucket(bucket string) error {
// CopyObject - copy object source object to destination object.
// if source object and destination object are same we only
// update metadata.
func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) {
func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (oi ObjectInfo, e error) {
if _, err := fs.statBucketDir(srcBucket); err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket)
return oi, toObjectErr(err, srcBucket)
}
// Stat the file to get file size.
fi, err := fsStatFile(pathJoin(fs.fsPath, srcBucket, srcObject))
if err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Check if this request is only metadata update.
@ -322,7 +322,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
var wlk *lock.LockedFile
wlk, err = fs.rwPool.Write(fsMetaPath)
if err != nil {
return ObjectInfo{}, toObjectErr(traceError(err), srcBucket, srcObject)
return oi, toObjectErr(traceError(err), srcBucket, srcObject)
}
// This close will allow for locks to be synchronized on `fs.json`.
defer wlk.Close()
@ -331,7 +331,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
fsMeta := newFSMetaV1()
fsMeta.Meta = metadata
if _, err = fsMeta.WriteTo(wlk); err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Return the new object info.
@ -356,7 +356,7 @@ func (fs fsObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
objInfo, err := fs.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "")
if err != nil {
return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject)
return oi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
@ -431,7 +431,7 @@ func (fs fsObjects) GetObject(bucket, object string, offset int64, length int64,
}
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
func (fs fsObjects) getObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
fsMeta := fsMetaV1{}
fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile)
@ -446,33 +446,33 @@ func (fs fsObjects) getObjectInfo(bucket, object string) (ObjectInfo, error) {
// PutObject() transaction, if we arrive at such
// a situation we just ignore and continue.
if errorCause(rerr) != io.EOF {
return ObjectInfo{}, toObjectErr(rerr, bucket, object)
return oi, toObjectErr(rerr, bucket, object)
}
}
}
// Ignore if `fs.json` is not available, this is true for pre-existing data.
if err != nil && err != errFileNotFound {
return ObjectInfo{}, toObjectErr(traceError(err), bucket, object)
return oi, toObjectErr(traceError(err), bucket, object)
}
// Stat the file to get file size.
fi, err := fsStatFile(pathJoin(fs.fsPath, bucket, object))
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
return fsMeta.ToObjectInfo(bucket, object, fi), nil
}
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (fs fsObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
func (fs fsObjects) GetObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
if err := checkGetObjArgs(bucket, object); err != nil {
return ObjectInfo{}, err
return oi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket)
return oi, toObjectErr(err, bucket)
}
return fs.getObjectInfo(bucket, object)
@ -759,18 +759,18 @@ func (fs fsObjects) getObjectETag(bucket, entry string) (string, error) {
// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool
// state for future re-entrant list requests.
func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, fs); err != nil {
return ListObjectsInfo{}, err
return loi, err
}
if _, err := fs.statBucketDir(bucket); err != nil {
return ListObjectsInfo{}, err
return loi, err
}
// With max keys of zero we have reached eof, return right here.
if maxKeys == 0 {
return ListObjectsInfo{}, nil
return loi, nil
}
// For delimiter and prefix as '/' we do not list anything at all
@ -779,7 +779,7 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// as '/' we don't have any entries, since all the keys are
// of form 'keyName/...'
if delimiter == slashSeparator && prefix == slashSeparator {
return ListObjectsInfo{}, nil
return loi, nil
}
// Over flowing count - reset to maxObjectList.
@ -860,13 +860,13 @@ func (fs fsObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
if walkResult.err != nil {
// File not found is a valid case.
if errorCause(walkResult.err) == errFileNotFound {
return ListObjectsInfo{}, nil
return loi, nil
}
return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix)
return loi, toObjectErr(walkResult.err, bucket, prefix)
}
objInfo, err := entryToObjectInfo(walkResult.entry)
if err != nil {
return ListObjectsInfo{}, nil
return loi, nil
}
nextMarker = objInfo.Name
objInfos = append(objInfos, objInfo)
@ -908,8 +908,8 @@ func (fs fsObjects) HealBucket(bucket string) error {
}
// ListObjectsHeal - list all objects to be healed. Valid only for XL
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{})
}
// ListBucketsHeal - list all buckets to be healed. Valid only for XL
@ -918,6 +918,6 @@ func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) {
}
func (fs fsObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{})
}

View File

@ -32,12 +32,12 @@ func (a *azureObjects) HealObject(bucket, object string) (int, int, error) {
}
// ListObjectsHeal - Not relevant.
func (a *azureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
func (a *azureObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (a *azureObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{})
}

View File

@ -179,8 +179,8 @@ func (a *azureObjects) Shutdown() error {
}
// StorageInfo - Not relevant to Azure backend.
func (a *azureObjects) StorageInfo() StorageInfo {
return StorageInfo{}
func (a *azureObjects) StorageInfo() (si StorageInfo) {
return si
}
// MakeBucketWithLocation - Create a new container on azure backend.
@ -190,13 +190,13 @@ func (a *azureObjects) MakeBucketWithLocation(bucket, location string) error {
}
// GetBucketInfo - Get bucket metadata..
func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
func (a *azureObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// Azure does not have an equivalent call, hence use ListContainers.
resp, err := a.client.ListContainers(storage.ListContainersParameters{
Prefix: bucket,
})
if err != nil {
return BucketInfo{}, azureToObjectError(traceError(err), bucket)
return bi, azureToObjectError(traceError(err), bucket)
}
for _, container := range resp.Containers {
if container.Name == bucket {
@ -209,7 +209,7 @@ func (a *azureObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
} // else continue
}
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
return bi, traceError(BucketNotFound{Bucket: bucket})
}
// ListBuckets - Lists all azure containers, uses Azure equivalent ListContainers.

View File

@ -24,14 +24,14 @@ import (
)
// AnonPutObject creates a new object anonymously with the incoming data,
func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) {
var sha256sumBytes []byte
var err error
if sha256sum != "" {
sha256sumBytes, err = hex.DecodeString(sha256sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
}
@ -40,14 +40,14 @@ func (l *s3Objects) AnonPutObject(bucket string, object string, size int64, data
if md5sum != "" {
md5sumBytes, err = hex.DecodeString(md5sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
delete(metadata, "etag")
}
oi, err := l.anonClient.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
@ -74,47 +74,47 @@ func (l *s3Objects) AnonGetObject(bucket string, key string, startOffset int64,
}
// AnonGetObjectInfo - Get object info anonymously
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (ObjectInfo, error) {
func (l *s3Objects) AnonGetObjectInfo(bucket string, object string) (objInfo ObjectInfo, e error) {
r := minio.NewHeadReqHeaders()
oi, err := l.anonClient.StatObject(bucket, object, r)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// AnonListObjects - List objects anonymously
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (l *s3Objects) AnonListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.anonClient.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// AnonListObjectsV2 - List objects in V2 mode, anonymously
func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) {
func (l *s3Objects) AnonListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (loi ListObjectsV2Info, e error) {
result, err := l.anonClient.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
}
// AnonGetBucketInfo - Get bucket metadata anonymously.
func (l *s3Objects) AnonGetBucketInfo(bucket string) (BucketInfo, error) {
func (l *s3Objects) AnonGetBucketInfo(bucket string) (bi BucketInfo, e error) {
if exists, err := l.anonClient.BucketExists(bucket); err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(traceError(err), bucket)
} else if !exists {
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
return bi, traceError(BucketNotFound{Bucket: bucket})
}
buckets, err := l.anonClient.ListBuckets()
if err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(traceError(err), bucket)
}
for _, bi := range buckets {
@ -128,5 +128,5 @@ func (l *s3Objects) AnonGetBucketInfo(bucket string) (BucketInfo, error) {
}, nil
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
return bi, traceError(BucketNotFound{Bucket: bucket})
}

View File

@ -32,11 +32,11 @@ func (l *s3Objects) HealObject(bucket string, object string) (int, int, error) {
}
// ListObjectsHeal - Not relevant.
func (l *s3Objects) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
return ListObjectsInfo{}, traceError(NotImplemented{})
func (l *s3Objects) ListObjectsHeal(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
return loi, traceError(NotImplemented{})
}
// ListUploadsHeal - Not relevant.
func (l *s3Objects) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
return ListMultipartsInfo{}, traceError(NotImplemented{})
func (l *s3Objects) ListUploadsHeal(bucket string, prefix string, marker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, traceError(NotImplemented{})
}

View File

@ -128,8 +128,8 @@ func (l *s3Objects) Shutdown() error {
}
// StorageInfo is not relevant to S3 backend.
func (l *s3Objects) StorageInfo() StorageInfo {
return StorageInfo{}
func (l *s3Objects) StorageInfo() (si StorageInfo) {
return si
}
// MakeBucket creates a new container on S3 backend.
@ -142,10 +142,10 @@ func (l *s3Objects) MakeBucketWithLocation(bucket, location string) error {
}
// GetBucketInfo gets bucket metadata..
func (l *s3Objects) GetBucketInfo(bucket string) (BucketInfo, error) {
func (l *s3Objects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
buckets, err := l.Client.ListBuckets()
if err != nil {
return BucketInfo{}, s3ToObjectError(traceError(err), bucket)
return bi, s3ToObjectError(traceError(err), bucket)
}
for _, bi := range buckets {
@ -159,7 +159,7 @@ func (l *s3Objects) GetBucketInfo(bucket string) (BucketInfo, error) {
}, nil
}
return BucketInfo{}, traceError(BucketNotFound{Bucket: bucket})
return bi, traceError(BucketNotFound{Bucket: bucket})
}
// ListBuckets lists all S3 buckets
@ -190,20 +190,20 @@ func (l *s3Objects) DeleteBucket(bucket string) error {
}
// ListObjects lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (l *s3Objects) ListObjects(bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
return ListObjectsInfo{}, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketResult(bucket, result), nil
}
// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (ListObjectsV2Info, error) {
func (l *s3Objects) ListObjectsV2(bucket, prefix, continuationToken string, fetchOwner bool, delimiter string, maxKeys int) (loi ListObjectsV2Info, e error) {
result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys)
if err != nil {
return ListObjectsV2Info{}, s3ToObjectError(traceError(err), bucket)
return loi, s3ToObjectError(traceError(err), bucket)
}
return fromMinioClientListBucketV2Result(bucket, result), nil
@ -313,14 +313,14 @@ func (l *s3Objects) GetObjectInfo(bucket string, object string) (objInfo ObjectI
}
// PutObject creates a new object with the incoming data,
func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (ObjectInfo, error) {
func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.Reader, metadata map[string]string, sha256sum string) (objInfo ObjectInfo, e error) {
var sha256sumBytes []byte
var err error
if sha256sum != "" {
sha256sumBytes, err = hex.DecodeString(sha256sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
}
@ -329,29 +329,29 @@ func (l *s3Objects) PutObject(bucket string, object string, size int64, data io.
if md5sum != "" {
md5sumBytes, err = hex.DecodeString(md5sum)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
delete(metadata, "etag")
}
oi, err := l.Client.PutObject(bucket, object, size, data, md5sumBytes, sha256sumBytes, toMinioClientMetadata(metadata))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return objInfo, s3ToObjectError(traceError(err), bucket, object)
}
return fromMinioClientObjectInfo(bucket, oi), nil
}
// CopyObject copies a blob from source container to destination container.
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (ObjectInfo, error) {
func (l *s3Objects) CopyObject(srcBucket string, srcObject string, destBucket string, destObject string, metadata map[string]string) (objInfo ObjectInfo, e error) {
err := l.Client.CopyObject(destBucket, destObject, path.Join(srcBucket, srcObject), minio.CopyConditions{})
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), srcBucket, srcObject)
return objInfo, s3ToObjectError(traceError(err), srcBucket, srcObject)
}
oi, err := l.GetObjectInfo(destBucket, destObject)
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), destBucket, destObject)
return objInfo, s3ToObjectError(traceError(err), destBucket, destObject)
}
return oi, nil
@ -406,10 +406,10 @@ func fromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) Li
}
// ListMultipartUploads lists all multipart uploads.
func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
func (l *s3Objects) ListMultipartUploads(bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
return fromMinioClientListMultipartsInfo(result), nil
@ -455,20 +455,20 @@ func fromMinioClientObjectPart(op minio.ObjectPart) PartInfo {
}
// PutObjectPart puts a part of object in bucket
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
func (l *s3Objects) PutObjectPart(bucket string, object string, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
md5HexBytes, err := hex.DecodeString(md5Hex)
if err != nil {
return PartInfo{}, err
return pi, err
}
sha256sumBytes, err := hex.DecodeString(sha256sum)
if err != nil {
return PartInfo{}, err
return pi, err
}
info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, size, data, md5HexBytes, sha256sumBytes)
if err != nil {
return PartInfo{}, err
return pi, err
}
return fromMinioClientObjectPart(info), nil
@ -500,10 +500,10 @@ func fromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInf
}
// ListObjectParts returns all object parts for specified object in specified bucket
func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (ListPartsInfo, error) {
func (l *s3Objects) ListObjectParts(bucket string, object string, uploadID string, partNumberMarker int, maxParts int) (lpi ListPartsInfo, e error) {
result, err := l.Client.ListObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
if err != nil {
return ListPartsInfo{}, err
return lpi, err
}
return fromMinioClientListPartsInfo(result), nil
@ -532,10 +532,10 @@ func toMinioClientCompleteParts(parts []completePart) []minio.CompletePart {
}
// CompleteMultipartUpload completes ongoing multipart upload and finalizes object
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (ObjectInfo, error) {
func (l *s3Objects) CompleteMultipartUpload(bucket string, object string, uploadID string, uploadedParts []completePart) (oi ObjectInfo, e error) {
err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, toMinioClientCompleteParts(uploadedParts))
if err != nil {
return ObjectInfo{}, s3ToObjectError(traceError(err), bucket, object)
return oi, s3ToObjectError(traceError(err), bucket, object)
}
return l.GetObjectInfo(bucket, object)

View File

@ -59,13 +59,13 @@ type amqpConn struct {
// dialAMQP - dials and returns an amqpConnection instance,
// for sending notifications. Returns error if amqp logger
// is not enabled.
func dialAMQP(amqpL amqpNotify) (amqpConn, error) {
func dialAMQP(amqpL amqpNotify) (ac amqpConn, e error) {
if !amqpL.Enable {
return amqpConn{}, errNotifyNotEnabled
return ac, errNotifyNotEnabled
}
conn, err := amqp.Dial(amqpL.URL)
if err != nil {
return amqpConn{}, err
return ac, err
}
return amqpConn{Connection: conn, params: amqpL}, nil
}

View File

@ -66,13 +66,13 @@ type kafkaConn struct {
topic string
}
func dialKafka(kn kafkaNotify) (kafkaConn, error) {
func dialKafka(kn kafkaNotify) (kc kafkaConn, e error) {
if !kn.Enable {
return kafkaConn{}, errNotifyNotEnabled
return kc, errNotifyNotEnabled
}
if kn.Topic == "" {
return kafkaConn{}, kkErrFunc(
return kc, kkErrFunc(
"Topic was not specified in configuration")
}
@ -85,7 +85,7 @@ func dialKafka(kn kafkaNotify) (kafkaConn, error) {
p, err := sarama.NewSyncProducer(kn.Brokers, config)
if err != nil {
return kafkaConn{}, kkErrFunc("Failed to start producer: %v", err)
return kc, kkErrFunc("Failed to start producer: %v", err)
}
return kafkaConn{p, kn.Topic}, nil

View File

@ -50,9 +50,9 @@ type mqttConn struct {
Client MQTT.Client
}
func dialMQTT(mqttL mqttNotify) (mqttConn, error) {
func dialMQTT(mqttL mqttNotify) (mc mqttConn, e error) {
if !mqttL.Enable {
return mqttConn{}, errNotifyNotEnabled
return mc, errNotifyNotEnabled
}
connOpts := &MQTT.ClientOptions{
ClientID: mqttL.ClientID,
@ -66,7 +66,7 @@ func dialMQTT(mqttL mqttNotify) (mqttConn, error) {
connOpts.AddBroker(mqttL.Broker)
client := MQTT.NewClient(connOpts)
if token := client.Connect(); token.Wait() && token.Error() != nil {
return mqttConn{}, token.Error()
return mc, token.Error()
}
return mqttConn{Client: client, params: mqttL}, nil
}

View File

@ -145,9 +145,9 @@ type mySQLConn struct {
*sql.DB
}
func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
func dialMySQL(msql mySQLNotify) (mc mySQLConn, e error) {
if !msql.Enable {
return mySQLConn{}, errNotifyNotEnabled
return mc, errNotifyNotEnabled
}
dsnStr := msql.DsnString
@ -166,7 +166,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
db, err := sql.Open("mysql", dsnStr)
if err != nil {
return mySQLConn{}, mysqlErrFunc(
return mc, mysqlErrFunc(
"Connection opening failure (dsnStr=%s): %v",
dsnStr, err)
}
@ -174,7 +174,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
// ping to check that server is actually reachable.
err = db.Ping()
if err != nil {
return mySQLConn{}, mysqlErrFunc(
return mc, mysqlErrFunc(
"Ping to server failed with: %v", err)
}
@ -190,7 +190,7 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
_, errCreate := db.Exec(fmt.Sprintf(createStmt, msql.Table))
if errCreate != nil {
// failed to create the table. error out.
return mySQLConn{}, mysqlErrFunc(
return mc, mysqlErrFunc(
"'Select' failed with %v, then 'Create Table' failed with %v",
err, errCreate,
)
@ -205,22 +205,20 @@ func dialMySQL(msql mySQLNotify) (mySQLConn, error) {
stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNSMySQL,
msql.Table))
if err != nil {
return mySQLConn{},
mysqlErrFunc("create UPSERT prepared statement failed with: %v", err)
return mc, mysqlErrFunc("create UPSERT prepared statement failed with: %v", err)
}
// delete statement
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNSMySQL,
msql.Table))
if err != nil {
return mySQLConn{},
mysqlErrFunc("create DELETE prepared statement failed with: %v", err)
return mc, mysqlErrFunc("create DELETE prepared statement failed with: %v", err)
}
case formatAccess:
// insert statement
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccessMySQL,
msql.Table))
if err != nil {
return mySQLConn{}, mysqlErrFunc(
return mc, mysqlErrFunc(
"create INSERT prepared statement failed with: %v", err)
}

View File

@ -69,9 +69,9 @@ type natsIOConn struct {
// dialNATS - dials and returns an natsIOConn instance,
// for sending notifications. Returns error if nats logger
// is not enabled.
func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) {
func dialNATS(natsL natsNotify, testDial bool) (nioc natsIOConn, e error) {
if !natsL.Enable {
return natsIOConn{}, errNotifyNotEnabled
return nioc, errNotifyNotEnabled
}
// Construct natsIOConn which holds all NATS connection information
@ -105,7 +105,7 @@ func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) {
// Do the real connection to the NATS server
sc, err := stan.Connect(natsL.Streaming.ClusterID, clientID, connOpts...)
if err != nil {
return natsIOConn{}, err
return nioc, err
}
// Save the created connection
conn.stanConn = sc
@ -120,7 +120,7 @@ func dialNATS(natsL natsNotify, testDial bool) (natsIOConn, error) {
// Do the real connection
nc, err := natsC.Connect()
if err != nil {
return natsIOConn{}, err
return nioc, err
}
// Save the created connection
conn.natsConn = nc

View File

@ -153,9 +153,9 @@ type pgConn struct {
*sql.DB
}
func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
func dialPostgreSQL(pgN postgreSQLNotify) (pc pgConn, e error) {
if !pgN.Enable {
return pgConn{}, errNotifyNotEnabled
return pc, errNotifyNotEnabled
}
// collect connection params
@ -179,7 +179,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
db, err := sql.Open("postgres", connStr)
if err != nil {
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"Connection opening failure (connectionString=%s): %v",
connStr, err)
}
@ -187,7 +187,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
// ping to check that server is actually reachable.
err = db.Ping()
if err != nil {
return pgConn{}, pgErrFunc("Ping to server failed with: %v",
return pc, pgErrFunc("Ping to server failed with: %v",
err)
}
@ -203,7 +203,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
_, errCreate := db.Exec(fmt.Sprintf(createStmt, pgN.Table))
if errCreate != nil {
// failed to create the table. error out.
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"'Select' failed with %v, then 'Create Table' failed with %v",
err, errCreate,
)
@ -218,14 +218,14 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNS,
pgN.Table))
if err != nil {
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"create UPSERT prepared statement failed with: %v", err)
}
// delete statement
stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNS,
pgN.Table))
if err != nil {
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"create DELETE prepared statement failed with: %v", err)
}
case formatAccess:
@ -233,7 +233,7 @@ func dialPostgreSQL(pgN postgreSQLNotify) (pgConn, error) {
stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccess,
pgN.Table))
if err != nil {
return pgConn{}, pgErrFunc(
return pc, pgErrFunc(
"create INSERT prepared statement failed with: %v", err)
}
}

View File

@ -112,7 +112,7 @@ type PostPolicyForm struct {
}
// parsePostPolicyForm - Parse JSON policy string into typed POostPolicyForm structure.
func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
func parsePostPolicyForm(policy string) (ppf PostPolicyForm, e error) {
// Convert po into interfaces and
// perform strict type conversion using reflection.
var rawPolicy struct {
@ -122,7 +122,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
err := json.Unmarshal([]byte(policy), &rawPolicy)
if err != nil {
return PostPolicyForm{}, err
return ppf, err
}
parsedPolicy := PostPolicyForm{}
@ -130,7 +130,7 @@ func parsePostPolicyForm(policy string) (PostPolicyForm, error) {
// Parse expiry time.
parsedPolicy.Expiration, err = time.Parse(time.RFC3339Nano, rawPolicy.Expiration)
if err != nil {
return PostPolicyForm{}, err
return ppf, err
}
parsedPolicy.Conditions.Policies = make(map[string]struct {
Operator string

View File

@ -45,20 +45,20 @@ func (c credentialHeader) getScope() string {
}
// parse credentialHeader string into its structured form.
func parseCredentialHeader(credElement string) (credentialHeader, APIErrorCode) {
func parseCredentialHeader(credElement string) (ch credentialHeader, aec APIErrorCode) {
creds := strings.Split(strings.TrimSpace(credElement), "=")
if len(creds) != 2 {
return credentialHeader{}, ErrMissingFields
return ch, ErrMissingFields
}
if creds[0] != "Credential" {
return credentialHeader{}, ErrMissingCredTag
return ch, ErrMissingCredTag
}
credElements := strings.Split(strings.TrimSpace(creds[1]), "/")
if len(credElements) != 5 {
return credentialHeader{}, ErrCredMalformed
return ch, ErrCredMalformed
}
if !isAccessKeyValid(credElements[0]) {
return credentialHeader{}, ErrInvalidAccessKeyID
return ch, ErrInvalidAccessKeyID
}
// Save access key id.
cred := credentialHeader{
@ -67,15 +67,15 @@ func parseCredentialHeader(credElement string) (credentialHeader, APIErrorCode)
var e error
cred.scope.date, e = time.Parse(yyyymmdd, credElements[1])
if e != nil {
return credentialHeader{}, ErrMalformedCredentialDate
return ch, ErrMalformedCredentialDate
}
cred.scope.region = credElements[2]
if credElements[3] != "s3" {
return credentialHeader{}, ErrInvalidService
return ch, ErrInvalidService
}
cred.scope.service = credElements[3]
if credElements[4] != "aws4_request" {
return credentialHeader{}, ErrInvalidRequestVersion
return ch, ErrInvalidRequestVersion
}
cred.scope.request = credElements[4]
return cred, ErrNone
@ -148,17 +148,17 @@ func doesV4PresignParamsExist(query url.Values) APIErrorCode {
}
// Parses all the presigned signature values into separate elements.
func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) {
func parsePreSignV4(query url.Values) (psv preSignValues, aec APIErrorCode) {
var err APIErrorCode
// verify whether the required query params exist.
err = doesV4PresignParamsExist(query)
if err != ErrNone {
return preSignValues{}, err
return psv, err
}
// Verify if the query algorithm is supported or not.
if query.Get("X-Amz-Algorithm") != signV4Algorithm {
return preSignValues{}, ErrInvalidQuerySignatureAlgo
return psv, ErrInvalidQuerySignatureAlgo
}
// Initialize signature version '4' structured header.
@ -167,35 +167,35 @@ func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) {
// Save credential.
preSignV4Values.Credential, err = parseCredentialHeader("Credential=" + query.Get("X-Amz-Credential"))
if err != ErrNone {
return preSignValues{}, err
return psv, err
}
var e error
// Save date in native time.Time.
preSignV4Values.Date, e = time.Parse(iso8601Format, query.Get("X-Amz-Date"))
if e != nil {
return preSignValues{}, ErrMalformedPresignedDate
return psv, ErrMalformedPresignedDate
}
// Save expires in native time.Duration.
preSignV4Values.Expires, e = time.ParseDuration(query.Get("X-Amz-Expires") + "s")
if e != nil {
return preSignValues{}, ErrMalformedExpires
return psv, ErrMalformedExpires
}
if preSignV4Values.Expires < 0 {
return preSignValues{}, ErrNegativeExpires
return psv, ErrNegativeExpires
}
// Save signed headers.
preSignV4Values.SignedHeaders, err = parseSignedHeader("SignedHeaders=" + query.Get("X-Amz-SignedHeaders"))
if err != ErrNone {
return preSignValues{}, err
return psv, err
}
// Save signature.
preSignV4Values.Signature, err = parseSignature("Signature=" + query.Get("X-Amz-Signature"))
if err != ErrNone {
return preSignValues{}, err
return psv, err
}
// Return structed form of signature query string.
@ -207,25 +207,25 @@ func parsePreSignV4(query url.Values) (preSignValues, APIErrorCode) {
// Authorization: algorithm Credential=accessKeyID/credScope, \
// SignedHeaders=signedHeaders, Signature=signature
//
func parseSignV4(v4Auth string) (signValues, APIErrorCode) {
func parseSignV4(v4Auth string) (sv signValues, aec APIErrorCode) {
// Replace all spaced strings, some clients can send spaced
// parameters and some won't. So we pro-actively remove any spaces
// to make parsing easier.
v4Auth = strings.Replace(v4Auth, " ", "", -1)
if v4Auth == "" {
return signValues{}, ErrAuthHeaderEmpty
return sv, ErrAuthHeaderEmpty
}
// Verify if the header algorithm is supported or not.
if !strings.HasPrefix(v4Auth, signV4Algorithm) {
return signValues{}, ErrSignatureVersionNotSupported
return sv, ErrSignatureVersionNotSupported
}
// Strip off the Algorithm prefix.
v4Auth = strings.TrimPrefix(v4Auth, signV4Algorithm)
authFields := strings.Split(strings.TrimSpace(v4Auth), ",")
if len(authFields) != 3 {
return signValues{}, ErrMissingFields
return sv, ErrMissingFields
}
// Initialize signature version '4' structured header.
@ -235,19 +235,19 @@ func parseSignV4(v4Auth string) (signValues, APIErrorCode) {
// Save credentail values.
signV4Values.Credential, err = parseCredentialHeader(authFields[0])
if err != ErrNone {
return signValues{}, err
return sv, err
}
// Save signed headers.
signV4Values.SignedHeaders, err = parseSignedHeader(authFields[1])
if err != ErrNone {
return signValues{}, err
return sv, err
}
// Save signature.
signV4Values.Signature, err = parseSignature(authFields[2])
if err != ErrNone {
return signValues{}, err
return sv, err
}
// Return the structure here.

View File

@ -144,15 +144,15 @@ func (xl xlObjects) getBucketInfo(bucketName string) (bucketInfo BucketInfo, err
}
// GetBucketInfo - returns BucketInfo for a bucket.
func (xl xlObjects) GetBucketInfo(bucket string) (BucketInfo, error) {
func (xl xlObjects) GetBucketInfo(bucket string) (bi BucketInfo, e error) {
// Verify if bucket is valid.
if !IsValidBucketName(bucket) {
return BucketInfo{}, BucketNameInvalid{Bucket: bucket}
return bi, BucketNameInvalid{Bucket: bucket}
}
bucketInfo, err := xl.getBucketInfo(bucket)
if err != nil {
return BucketInfo{}, toObjectErr(err, bucket)
return bi, toObjectErr(err, bucket)
}
return bucketInfo, nil
}

View File

@ -69,7 +69,7 @@ func listDirHealFactory(isLeaf isLeafFunc, disks ...StorageAPI) listDirFunc {
}
// listObjectsHeal - wrapper function implemented over file tree walk.
func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
// Default is recursive, if delimiter is set then list non recursive.
recursive := true
if delimiter == slashSeparator {
@ -98,7 +98,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
}
// For any walk error return right away.
if walkResult.err != nil {
return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix)
return loi, toObjectErr(walkResult.err, bucket, prefix)
}
entry := walkResult.entry
var objInfo ObjectInfo
@ -115,7 +115,7 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
if errorCause(err) == errFileNotFound {
continue
}
return ListObjectsInfo{}, toObjectErr(err, bucket, prefix)
return loi, toObjectErr(err, bucket, prefix)
}
}
nextMarker = objInfo.Name
@ -160,14 +160,14 @@ func (xl xlObjects) listObjectsHeal(bucket, prefix, marker, delimiter string, ma
}
// ListObjects - list all objects at prefix, delimited by '/'.
func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, xl); err != nil {
return ListObjectsInfo{}, err
return loi, err
}
// With max keys of zero we have reached eof, return right here.
if maxKeys == 0 {
return ListObjectsInfo{}, nil
return loi, nil
}
// For delimiter and prefix as '/' we do not list anything at all
@ -175,7 +175,7 @@ func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, ma
// with the prefix. On a flat namespace with 'prefix' as '/'
// we don't have any entries, since all the keys are of form 'keyName/...'
if delimiter == slashSeparator && prefix == slashSeparator {
return ListObjectsInfo{}, nil
return loi, nil
}
// Over flowing count - reset to maxObjectList.
@ -191,27 +191,27 @@ func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, ma
}
// Return error at the end.
return ListObjectsInfo{}, toObjectErr(err, bucket, prefix)
return loi, toObjectErr(err, bucket, prefix)
}
// ListUploadsHeal - lists ongoing multipart uploads that require
// healing in one or more disks.
func (xl xlObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error) {
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
// For delimiter and prefix as '/' we do not list anything at all
// since according to s3 spec we stop at the 'delimiter' along
// with the prefix. On a flat namespace with 'prefix' as '/'
// we don't have any entries, since all the keys are of form 'keyName/...'
if delimiter == slashSeparator && prefix == slashSeparator {
return ListMultipartsInfo{}, nil
return lmi, nil
}
// Initiate a list operation.
listMultipartInfo, err := xl.listMultipartUploadsHeal(bucket, prefix,
marker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return ListMultipartsInfo{}, toObjectErr(err, bucket, prefix)
return lmi, toObjectErr(err, bucket, prefix)
}
// We got the entries successfully return.
@ -245,7 +245,7 @@ func fetchMultipartUploadIDs(bucket, keyMarker, uploadIDMarker string,
// listMultipartUploadsHeal - Returns a list of incomplete multipart
// uploads that need to be healed.
func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker,
uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
result := ListMultipartsInfo{
IsTruncated: true,
@ -265,7 +265,7 @@ func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker,
uploads, _, err = fetchMultipartUploadIDs(bucket, keyMarker,
uploadIDMarker, maxUploads, xl.getLoadBalancedDisks())
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
maxUploads = maxUploads - len(uploads)
}
@ -321,7 +321,7 @@ func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker,
}
// For any error during tree walk, we should return right away.
if walkResult.err != nil {
return ListMultipartsInfo{}, walkResult.err
return lmi, walkResult.err
}
entry := strings.TrimPrefix(walkResult.entry,
@ -346,7 +346,7 @@ func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker,
newUploads, end, err = fetchMultipartUploadIDs(bucket, entry, uploadIDMarker,
uploadsLeft, xl.getLoadBalancedDisks())
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
uploads = append(uploads, newUploads...)
uploadsLeft -= len(newUploads)

View File

@ -46,7 +46,7 @@ func listDirFactory(isLeaf isLeafFunc, treeWalkIgnoredErrs []error, disks ...Sto
}
// listObjects - wrapper function implemented over file tree walk.
func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
// Default is recursive, if delimiter is set then list non recursive.
recursive := true
if delimiter == slashSeparator {
@ -74,7 +74,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
}
// For any walk error return right away.
if walkResult.err != nil {
return ListObjectsInfo{}, toObjectErr(walkResult.err, bucket, prefix)
return loi, toObjectErr(walkResult.err, bucket, prefix)
}
entry := walkResult.entry
var objInfo ObjectInfo
@ -92,7 +92,7 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
if errorCause(err) == errFileNotFound {
continue
}
return ListObjectsInfo{}, toObjectErr(err, bucket, prefix)
return loi, toObjectErr(err, bucket, prefix)
}
}
nextMarker = objInfo.Name
@ -122,14 +122,14 @@ func (xl xlObjects) listObjects(bucket, prefix, marker, delimiter string, maxKey
}
// ListObjects - list all objects at prefix, delimited by '/'.
func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) {
func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) {
if err := checkListObjsArgs(bucket, prefix, marker, delimiter, xl); err != nil {
return ListObjectsInfo{}, err
return loi, err
}
// With max keys of zero we have reached eof, return right here.
if maxKeys == 0 {
return ListObjectsInfo{}, nil
return loi, nil
}
// For delimiter and prefix as '/' we do not list anything at all
@ -137,7 +137,7 @@ func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
// with the prefix. On a flat namespace with 'prefix' as '/'
// we don't have any entries, since all the keys are of form 'keyName/...'
if delimiter == slashSeparator && prefix == slashSeparator {
return ListObjectsInfo{}, nil
return loi, nil
}
// Over flowing count - reset to maxObjectList.
@ -153,5 +153,5 @@ func (xl xlObjects) ListObjects(bucket, prefix, marker, delimiter string, maxKey
}
// Return error at the end.
return ListObjectsInfo{}, toObjectErr(err, bucket, prefix)
return loi, toObjectErr(err, bucket, prefix)
}

View File

@ -282,14 +282,14 @@ func (m xlMetaV1) ObjectToPartOffset(offset int64) (partIndex int, partOffset in
// pickValidXLMeta - picks one valid xlMeta content and returns from a
// slice of xlmeta content. If no value is found this function panics
// and dies.
func pickValidXLMeta(metaArr []xlMetaV1, modTime time.Time) (xlMetaV1, error) {
func pickValidXLMeta(metaArr []xlMetaV1, modTime time.Time) (xmv xlMetaV1, e error) {
// Pick latest valid metadata.
for _, meta := range metaArr {
if meta.IsValid() && meta.Stat.ModTime.Equal(modTime) {
return meta, nil
}
}
return xlMetaV1{}, traceError(errors.New("No valid xl.json present"))
return xmv, traceError(errors.New("No valid xl.json present"))
}
// list of all errors that can be ignored in a metadata operation.

View File

@ -274,7 +274,7 @@ func commitXLMetadata(disks []StorageAPI, srcBucket, srcPrefix, dstBucket, dstPr
}
// listMultipartUploads - lists all multipart uploads.
func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
result := ListMultipartsInfo{
IsTruncated: true,
MaxUploads: maxUploads,
@ -324,7 +324,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
}
keyMarkerLock.RUnlock()
if err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
maxUploads = maxUploads - len(uploads)
}
@ -350,7 +350,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
}
// For any walk error return right away.
if walkResult.err != nil {
return ListMultipartsInfo{}, walkResult.err
return lmi, walkResult.err
}
entry := strings.TrimPrefix(walkResult.entry, retainSlash(bucket))
// For an entry looking like a directory, store and
@ -394,7 +394,7 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
if isErrIgnored(err, xlTreeWalkIgnoredErrs...) {
continue
}
return ListMultipartsInfo{}, err
return lmi, err
}
uploads = append(uploads, newUploads...)
maxUploads -= len(newUploads)
@ -446,9 +446,9 @@ func (xl xlObjects) listMultipartUploads(bucket, prefix, keyMarker, uploadIDMark
// Implements S3 compatible ListMultipartUploads API. The resulting
// ListMultipartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
func (xl xlObjects) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
if err := checkListMultipartArgs(bucket, prefix, keyMarker, uploadIDMarker, delimiter, xl); err != nil {
return ListMultipartsInfo{}, err
return lmi, err
}
return xl.listMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads)
@ -534,9 +534,9 @@ func (xl xlObjects) NewMultipartUpload(bucket, object string, meta map[string]st
// data is read from an existing object.
//
// Implements S3 compatible Upload Part Copy API.
func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (PartInfo, error) {
func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64) (pi PartInfo, e error) {
if err := checkNewMultipartArgs(srcBucket, srcObject, xl); err != nil {
return PartInfo{}, err
return pi, err
}
// Initialize pipe.
@ -553,7 +553,7 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
partInfo, err := xl.PutObjectPart(dstBucket, dstObject, uploadID, partID, length, pipeReader, "", "")
if err != nil {
return PartInfo{}, toObjectErr(err, dstBucket, dstObject)
return pi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
@ -568,9 +568,9 @@ func (xl xlObjects) CopyObjectPart(srcBucket, srcObject, dstBucket, dstObject, u
// of the multipart transaction.
//
// Implements S3 compatible Upload Part API.
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (PartInfo, error) {
func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Hex string, sha256sum string) (pi PartInfo, e error) {
if err := checkPutObjectPartArgs(bucket, object, xl); err != nil {
return PartInfo{}, err
return pi, err
}
var partsMetadata []xlMetaV1
@ -583,7 +583,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Validates if upload ID exists.
if !xl.isUploadIDExists(bucket, object, uploadID) {
preUploadIDLock.RUnlock()
return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return pi, traceError(InvalidUploadID{UploadID: uploadID})
}
// Read metadata associated with the object from all disks.
@ -592,7 +592,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum {
preUploadIDLock.RUnlock()
return PartInfo{}, toObjectErr(reducedErr, bucket, object)
return pi, toObjectErr(reducedErr, bucket, object)
}
preUploadIDLock.RUnlock()
@ -602,7 +602,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Pick one from the first valid metadata.
xlMeta, err := pickValidXLMeta(partsMetadata, modTime)
if err != nil {
return PartInfo{}, err
return pi, err
}
onlineDisks = shuffleDisks(onlineDisks, xlMeta.Erasure.Distribution)
@ -646,7 +646,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
if size > 0 {
if pErr := xl.prepareFile(minioMetaTmpBucket, tmpPartPath, size, onlineDisks, xlMeta.Erasure.BlockSize, xlMeta.Erasure.DataBlocks); err != nil {
return PartInfo{}, toObjectErr(pErr, bucket, object)
return pi, toObjectErr(pErr, bucket, object)
}
}
@ -657,13 +657,13 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Erasure code data and write across all disks.
onlineDisks, sizeWritten, checkSums, err := erasureCreateFile(onlineDisks, minioMetaTmpBucket, tmpPartPath, teeReader, allowEmpty, xlMeta.Erasure.BlockSize, xl.dataBlocks, xl.parityBlocks, bitRotAlgo, xl.writeQuorum)
if err != nil {
return PartInfo{}, toObjectErr(err, bucket, object)
return pi, toObjectErr(err, bucket, object)
}
// Should return IncompleteBody{} error when reader has fewer bytes
// than specified in request header.
if sizeWritten < size {
return PartInfo{}, traceError(IncompleteBody{})
return pi, traceError(IncompleteBody{})
}
// For size == -1, perhaps client is sending in chunked encoding
@ -677,14 +677,14 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
if md5Hex != "" {
if newMD5Hex != md5Hex {
// Returns md5 mismatch.
return PartInfo{}, traceError(BadDigest{md5Hex, newMD5Hex})
return pi, traceError(BadDigest{md5Hex, newMD5Hex})
}
}
if sha256sum != "" {
newSHA256sum := hex.EncodeToString(sha256Writer.Sum(nil))
if newSHA256sum != sha256sum {
return PartInfo{}, traceError(SHA256Mismatch{})
return pi, traceError(SHA256Mismatch{})
}
}
@ -695,21 +695,21 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Validate again if upload ID still exists.
if !xl.isUploadIDExists(bucket, object, uploadID) {
return PartInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return pi, traceError(InvalidUploadID{UploadID: uploadID})
}
// Rename temporary part file to its final location.
partPath := path.Join(uploadIDPath, partSuffix)
onlineDisks, err = renamePart(onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, xl.writeQuorum)
if err != nil {
return PartInfo{}, toObjectErr(err, minioMetaMultipartBucket, partPath)
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
}
// Read metadata again because it might be updated with parallel upload of another part.
partsMetadata, errs = readAllXLMetadata(onlineDisks, minioMetaMultipartBucket, uploadIDPath)
reducedErr = reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum {
return PartInfo{}, toObjectErr(reducedErr, bucket, object)
return pi, toObjectErr(reducedErr, bucket, object)
}
// Get current highest version based on re-read partsMetadata.
@ -718,7 +718,7 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Pick one from the first valid metadata.
xlMeta, err = pickValidXLMeta(partsMetadata, modTime)
if err != nil {
return PartInfo{}, err
return pi, err
}
// Once part is successfully committed, proceed with updating XL metadata.
@ -745,17 +745,17 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// Writes a unique `xl.json` each disk carrying new checksum related information.
if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, partsMetadata, xl.writeQuorum); err != nil {
return PartInfo{}, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath)
return pi, toObjectErr(err, minioMetaTmpBucket, tempXLMetaPath)
}
var rErr error
onlineDisks, rErr = commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempXLMetaPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum)
if rErr != nil {
return PartInfo{}, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
return pi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
}
fi, err := xl.statPart(bucket, object, uploadID, partSuffix)
if err != nil {
return PartInfo{}, toObjectErr(rErr, minioMetaMultipartBucket, partSuffix)
return pi, toObjectErr(rErr, minioMetaMultipartBucket, partSuffix)
}
// Return success.
@ -769,14 +769,14 @@ func (xl xlObjects) PutObjectPart(bucket, object, uploadID string, partID int, s
// listObjectParts - wrapper reading `xl.json` for a given object and
// uploadID. Lists all the parts captured inside `xl.json` content.
func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) {
result := ListPartsInfo{}
uploadIDPath := path.Join(bucket, object, uploadID)
xlParts, err := xl.readXLMetaParts(minioMetaMultipartBucket, uploadIDPath)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
return lpi, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath)
}
// Populate the result stub.
@ -806,7 +806,7 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM
var fi FileInfo
fi, err = xl.statPart(bucket, object, uploadID, part.Name)
if err != nil {
return ListPartsInfo{}, toObjectErr(err, minioMetaBucket, path.Join(uploadID, part.Name))
return lpi, toObjectErr(err, minioMetaBucket, path.Join(uploadID, part.Name))
}
result.Parts = append(result.Parts, PartInfo{
PartNumber: part.Number,
@ -837,9 +837,9 @@ func (xl xlObjects) listObjectParts(bucket, object, uploadID string, partNumberM
// Implements S3 compatible ListObjectParts API. The resulting
// ListPartsInfo structure is unmarshalled directly into XML and
// replied back to the client.
func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (ListPartsInfo, error) {
func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberMarker, maxParts int) (lpi ListPartsInfo, e error) {
if err := checkListPartsArgs(bucket, object, xl); err != nil {
return ListPartsInfo{}, err
return lpi, err
}
// Hold lock so that there is no competing
@ -850,7 +850,7 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return ListPartsInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return lpi, traceError(InvalidUploadID{UploadID: uploadID})
}
result, err := xl.listObjectParts(bucket, object, uploadID, partNumberMarker, maxParts)
return result, err
@ -862,9 +862,9 @@ func (xl xlObjects) ListObjectParts(bucket, object, uploadID string, partNumberM
// md5sums of all the parts.
//
// Implements S3 compatible Complete multipart API.
func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (ObjectInfo, error) {
func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, uploadID string, parts []completePart) (oi ObjectInfo, e error) {
if err := checkCompleteMultipartArgs(bucket, object, xl); err != nil {
return ObjectInfo{}, err
return oi, err
}
// Hold lock so that
@ -879,19 +879,19 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
defer uploadIDLock.Unlock()
if !xl.isUploadIDExists(bucket, object, uploadID) {
return ObjectInfo{}, traceError(InvalidUploadID{UploadID: uploadID})
return oi, traceError(InvalidUploadID{UploadID: uploadID})
}
// Check if an object is present as one of the parent dir.
// -- FIXME. (needs a new kind of lock).
if xl.parentDirIsObject(bucket, path.Dir(object)) {
return ObjectInfo{}, toObjectErr(traceError(errFileAccessDenied), bucket, object)
return oi, toObjectErr(traceError(errFileAccessDenied), bucket, object)
}
// Calculate s3 compatible md5sum for complete multipart.
s3MD5, err := getCompleteMultipartMD5(parts)
if err != nil {
return ObjectInfo{}, err
return oi, err
}
uploadIDPath := pathJoin(bucket, object, uploadID)
@ -900,7 +900,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
partsMetadata, errs := readAllXLMetadata(xl.storageDisks, minioMetaMultipartBucket, uploadIDPath)
reducedErr := reduceWriteQuorumErrs(errs, objectOpIgnoredErrs, xl.writeQuorum)
if errorCause(reducedErr) == errXLWriteQuorum {
return ObjectInfo{}, toObjectErr(reducedErr, bucket, object)
return oi, toObjectErr(reducedErr, bucket, object)
}
onlineDisks, modTime := listOnlineDisks(xl.storageDisks, partsMetadata, errs)
@ -911,7 +911,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Pick one from the first valid metadata.
xlMeta, err := pickValidXLMeta(partsMetadata, modTime)
if err != nil {
return ObjectInfo{}, err
return oi, err
}
// Order online disks in accordance with distribution order.
@ -931,17 +931,17 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
partIdx := objectPartIndex(currentXLMeta.Parts, part.PartNumber)
// All parts should have same part number.
if partIdx == -1 {
return ObjectInfo{}, traceError(InvalidPart{})
return oi, traceError(InvalidPart{})
}
// All parts should have same ETag as previously generated.
if currentXLMeta.Parts[partIdx].ETag != part.ETag {
return ObjectInfo{}, traceError(InvalidPart{})
return oi, traceError(InvalidPart{})
}
// All parts except the last part has to be atleast 5MB.
if (i < len(parts)-1) && !isMinAllowedPartSize(currentXLMeta.Parts[partIdx].Size) {
return ObjectInfo{}, traceError(PartTooSmall{
return oi, traceError(PartTooSmall{
PartNumber: part.PartNumber,
PartSize: currentXLMeta.Parts[partIdx].Size,
PartETag: part.ETag,
@ -986,13 +986,13 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, partsMetadata, xl.writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
return oi, toObjectErr(err, minioMetaTmpBucket, tempUploadIDPath)
}
var rErr error
onlineDisks, rErr = commitXLMetadata(onlineDisks, minioMetaTmpBucket, tempUploadIDPath, minioMetaMultipartBucket, uploadIDPath, xl.writeQuorum)
if rErr != nil {
return ObjectInfo{}, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
return oi, toObjectErr(rErr, minioMetaMultipartBucket, uploadIDPath)
}
defer func() {
@ -1020,7 +1020,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// regardless of `xl.json` status and rolled back in case of errors.
_, err = renameObject(xl.storageDisks, bucket, object, minioMetaTmpBucket, newUniqueID, xl.writeQuorum)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
}
@ -1039,7 +1039,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// Rename the multipart object to final location.
if onlineDisks, err = renameObject(onlineDisks, minioMetaMultipartBucket, uploadIDPath, bucket, object, xl.writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
// Hold the lock so that two parallel
@ -1052,7 +1052,7 @@ func (xl xlObjects) CompleteMultipartUpload(bucket string, object string, upload
// remove entry from uploads.json with quorum
if err = xl.removeUploadID(bucket, object, uploadID); err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaMultipartBucket, path.Join(bucket, object))
return oi, toObjectErr(err, minioMetaMultipartBucket, path.Join(bucket, object))
}
objInfo := ObjectInfo{

View File

@ -59,11 +59,11 @@ func (xl xlObjects) prepareFile(bucket, object string, size int64, onlineDisks [
// CopyObject - copy object source object to destination object.
// if source object and destination object are same we only
// update metadata.
func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (ObjectInfo, error) {
func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string, metadata map[string]string) (oi ObjectInfo, e error) {
// Read metadata associated with the object from all disks.
metaArr, errs := readAllXLMetadata(xl.storageDisks, srcBucket, srcObject)
if reducedErr := reduceReadQuorumErrs(errs, objectOpIgnoredErrs, xl.readQuorum); reducedErr != nil {
return ObjectInfo{}, toObjectErr(reducedErr, srcBucket, srcObject)
return oi, toObjectErr(reducedErr, srcBucket, srcObject)
}
// List all online disks.
@ -72,7 +72,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
// Pick latest valid metadata.
xlMeta, err := pickValidXLMeta(metaArr, modTime)
if err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Reorder online disks based on erasure distribution order.
@ -95,11 +95,11 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
// Write unique `xl.json` for each disk.
if onlineDisks, err = writeUniqueXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, xl.writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
return oi, toObjectErr(err, srcBucket, srcObject)
}
// Rename atomically `xl.json` from tmp location to destination for each disk.
if onlineDisks, err = renameXLMetadata(onlineDisks, minioMetaTmpBucket, tempObj, srcBucket, srcObject, xl.writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
return oi, toObjectErr(err, srcBucket, srcObject)
}
return xlMeta.ToObjectInfo(srcBucket, srcObject), nil
}
@ -119,7 +119,7 @@ func (xl xlObjects) CopyObject(srcBucket, srcObject, dstBucket, dstObject string
objInfo, err := xl.PutObject(dstBucket, dstObject, length, pipeReader, metadata, "")
if err != nil {
return ObjectInfo{}, toObjectErr(err, dstBucket, dstObject)
return oi, toObjectErr(err, dstBucket, dstObject)
}
// Explicitly close the reader.
@ -303,14 +303,14 @@ func (xl xlObjects) GetObject(bucket, object string, startOffset int64, length i
}
// GetObjectInfo - reads object metadata and replies back ObjectInfo.
func (xl xlObjects) GetObjectInfo(bucket, object string) (ObjectInfo, error) {
func (xl xlObjects) GetObjectInfo(bucket, object string) (oi ObjectInfo, e error) {
if err := checkGetObjArgs(bucket, object); err != nil {
return ObjectInfo{}, err
return oi, err
}
info, err := xl.getObjectInfo(bucket, object)
if err != nil {
return ObjectInfo{}, toObjectErr(err, bucket, object)
return oi, toObjectErr(err, bucket, object)
}
return info, nil
}

View File

@ -123,13 +123,13 @@ func hashOrder(key string, cardinality int) []int {
return nums
}
func parseXLStat(xlMetaBuf []byte) (statInfo, error) {
func parseXLStat(xlMetaBuf []byte) (si statInfo, e error) {
// obtain stat info.
stat := statInfo{}
// fetching modTime.
modTime, err := time.Parse(time.RFC3339, gjson.GetBytes(xlMetaBuf, "stat.modTime").String())
if err != nil {
return statInfo{}, err
return si, err
}
stat.ModTime = modTime
// obtain Stat.Size .
@ -207,7 +207,7 @@ func parseXLMetaMap(xlMetaBuf []byte) map[string]string {
}
// Constructs XLMetaV1 using `gjson` lib to retrieve each field.
func xlMetaV1UnmarshalJSON(xlMetaBuf []byte) (xlMetaV1, error) {
func xlMetaV1UnmarshalJSON(xlMetaBuf []byte) (xmv xlMetaV1, e error) {
xlMeta := xlMetaV1{}
// obtain version.
xlMeta.Version = parseXLVersion(xlMetaBuf)
@ -216,7 +216,7 @@ func xlMetaV1UnmarshalJSON(xlMetaBuf []byte) (xlMetaV1, error) {
// Parse xlMetaV1.Stat .
stat, err := parseXLStat(xlMetaBuf)
if err != nil {
return xlMetaV1{}, err
return xmv, err
}
xlMeta.Stat = stat
@ -247,11 +247,11 @@ func readXLMetaParts(disk StorageAPI, bucket string, object string) ([]objectPar
}
// read xl.json from the given disk and parse xlV1Meta.Stat and xlV1Meta.Meta using gjson.
func readXLMetaStat(disk StorageAPI, bucket string, object string) (statInfo, map[string]string, error) {
func readXLMetaStat(disk StorageAPI, bucket string, object string) (si statInfo, mp map[string]string, e error) {
// Reads entire `xl.json`.
xlMetaBuf, err := disk.ReadAll(bucket, path.Join(object, xlMetaJSONFile))
if err != nil {
return statInfo{}, nil, traceError(err)
return si, nil, traceError(err)
}
// obtain version.
@ -263,7 +263,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (statInfo, ma
// Validate if the xl.json we read is sane, return corrupted format.
if !isXLMetaValid(xlVersion, xlFormat) {
// For version mismatchs and unrecognized format, return corrupted format.
return statInfo{}, nil, traceError(errCorruptedFormat)
return si, nil, traceError(errCorruptedFormat)
}
// obtain xlMetaV1{}.Meta using `github.com/tidwall/gjson`.
@ -272,7 +272,7 @@ func readXLMetaStat(disk StorageAPI, bucket string, object string) (statInfo, ma
// obtain xlMetaV1{}.Stat using `github.com/tidwall/gjson`.
xlStat, err := parseXLStat(xlMetaBuf)
if err != nil {
return statInfo{}, nil, traceError(err)
return si, nil, traceError(err)
}
// Return structured `xl.json`.